void           rpc_put_task(struct rpc_task *);
 void           rpc_release_task(struct rpc_task *);
 void           rpc_exit_task(struct rpc_task *);
+void           rpc_release_calldata(const struct rpc_call_ops *, void *);
 void           rpc_killall_tasks(struct rpc_clnt *);
 int            rpc_execute(struct rpc_task *);
 void           rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
 
        rpc_restore_sigmask(&oldset);           
        return status;
 out_release:
-       if (tk_ops->rpc_release != NULL)
-               tk_ops->rpc_release(data);
+       rpc_release_calldata(tk_ops, data);
        return status;
 }
 
 
 }
 EXPORT_SYMBOL(rpc_exit_task);
 
+void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
+{
+       if (ops->rpc_release != NULL) {
+               lock_kernel();
+               ops->rpc_release(calldata);
+               unlock_kernel();
+       }
+}
+
 /*
  * This is the RPC `scheduler' (or rather, the finite state machine).
  */
        }
        if (task->tk_flags & RPC_TASK_DYNAMIC)
                call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
-       if (tk_ops->rpc_release)
-               tk_ops->rpc_release(calldata);
+       rpc_release_calldata(tk_ops, calldata);
 }
 EXPORT_SYMBOL(rpc_put_task);
 
        struct rpc_task *task;
        task = rpc_new_task(clnt, flags, ops, data);
        if (task == NULL) {
-               if (ops->rpc_release != NULL)
-                       ops->rpc_release(data);
+               rpc_release_calldata(ops, data);
                return ERR_PTR(-ENOMEM);
        }
        atomic_inc(&task->tk_count);