X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Ffuse%2Fdev.c;h=0c9a2ee54c91dfde34d502737e821e93d5a765ad;hb=576cfa934e357c44d6259f90c7d065de328a3691;hp=5cd43bf917a43b742976ffecdd7dd000e68328ca;hpb=9ba7cbba100bdaca7316d71d6c6298e61191f8b2;p=linux-2.6-omap-h63xx.git diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 5cd43bf917a..0c9a2ee54c9 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -66,6 +66,12 @@ static void restore_sigs(sigset_t *oldset) sigprocmask(SIG_SETMASK, oldset, NULL); } +/* + * Reset request, so that it can be reused + * + * The caller must be _very_ careful to make sure, that it is holding + * the only reference to req + */ void fuse_reset_request(struct fuse_req *req) { int preallocated = req->preallocated; @@ -109,18 +115,24 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc) int intr; sigset_t oldset; + atomic_inc(&fc->num_waiting); block_sigs(&oldset); intr = down_interruptible(&fc->outstanding_sem); restore_sigs(&oldset); - return intr ? NULL : do_get_request(fc); + if (intr) { + atomic_dec(&fc->num_waiting); + return NULL; + } + return do_get_request(fc); } +/* Must be called with fuse_lock held */ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) { - spin_lock(&fuse_lock); - if (req->preallocated) + if (req->preallocated) { + atomic_dec(&fc->num_waiting); list_add(&req->list, &fc->unused_list); - else + } else fuse_request_free(req); /* If we are in debt decrease that first */ @@ -128,10 +140,18 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) fc->outstanding_debt--; else up(&fc->outstanding_sem); - spin_unlock(&fuse_lock); } void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) +{ + if (atomic_dec_and_test(&req->count)) { + spin_lock(&fuse_lock); + fuse_putback_request(fc, req); + spin_unlock(&fuse_lock); + } +} + +static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) { if (atomic_dec_and_test(&req->count)) fuse_putback_request(fc, req); @@ -148,34 +168,21 @@ void fuse_release_background(struct fuse_req *req) spin_unlock(&fuse_lock); } -static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) -{ - int i; - struct fuse_init_out *arg = &req->misc.init_out; - - if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) - fc->conn_error = 1; - else { - fc->minor = arg->minor; - fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; - } - - /* After INIT reply is received other requests can go - out. So do (FUSE_MAX_OUTSTANDING - 1) number of - up()s on outstanding_sem. The last up() is done in - fuse_putback_request() */ - for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) - up(&fc->outstanding_sem); -} - /* * This function is called when a request is finished. Either a reply * has arrived or it was interrupted (and not yet sent) or some error * occurred during communication with userspace, or the device file * was closed. In case of a background request the reference to the * stored objects are released. The requester thread is woken up (if - * still waiting), and finally the reference to the request is - * released + * still waiting), the 'end' callback is called if given, else the + * reference to the request is released + * + * Releasing extra reference for foreground requests must be done + * within the same locked region as setting state to finished. This + * is because fuse_reset_request() may be called after request is + * finished and it must be the sole possessor. If request is + * interrupted and put in the background, it will return with an error + * and hence never be reset and reused. * * Called with fuse_lock, unlocks it */ @@ -183,24 +190,23 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) { list_del(&req->list); req->state = FUSE_REQ_FINISHED; - spin_unlock(&fuse_lock); - if (req->background) { + if (!req->background) { + wake_up(&req->waitq); + fuse_put_request_locked(fc, req); + spin_unlock(&fuse_lock); + } else { + void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; + req->end = NULL; + spin_unlock(&fuse_lock); down_read(&fc->sbput_sem); if (fc->mounted) fuse_release_background(req); up_read(&fc->sbput_sem); + if (end) + end(fc, req); + else + fuse_put_request(fc, req); } - wake_up(&req->waitq); - if (req->in.h.opcode == FUSE_INIT) - process_init_reply(fc, req); - else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { - /* Special case for failed iget in CREATE */ - u64 nodeid = req->in.h.nodeid; - fuse_reset_request(req); - fuse_send_forget(fc, req, nodeid, 1); - return; - } - fuse_put_request(fc, req); } /* @@ -254,11 +260,13 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); restore_sigs(&oldset); spin_lock(&fuse_lock); - if (req->state == FUSE_REQ_FINISHED) + if (req->state == FUSE_REQ_FINISHED && !req->interrupted) return; - req->out.h.error = -EINTR; - req->interrupted = 1; + if (!req->interrupted) { + req->out.h.error = -EINTR; + req->interrupted = 1; + } if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During @@ -360,28 +368,6 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) request_send_nowait(fc, req); } -void fuse_send_init(struct fuse_conn *fc) -{ - /* This is called from fuse_read_super() so there's guaranteed - to be exactly one request available */ - struct fuse_req *req = fuse_get_request(fc); - struct fuse_init_in *arg = &req->misc.init_in; - arg->major = FUSE_KERNEL_VERSION; - arg->minor = FUSE_KERNEL_MINOR_VERSION; - req->in.h.opcode = FUSE_INIT; - req->in.numargs = 1; - req->in.args[0].size = sizeof(*arg); - req->in.args[0].value = arg; - req->out.numargs = 1; - /* Variable length arguement used for backward compatibility - with interface version < 7.5. Rest of init_out is zeroed - by do_get_request(), so a short reply is not a problem */ - req->out.argvar = 1; - req->out.args[0].size = sizeof(struct fuse_init_out); - req->out.args[0].value = &req->misc.init_out; - request_send_background(fc, req); -} - /* * Lock the request. Up to the next unlock_request() there mustn't be * anything that could cause a page-fault. If the request was already @@ -764,6 +750,10 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, goto err_finish; spin_lock(&fuse_lock); + err = -ENOENT; + if (!fc->connected) + goto err_unlock; + req = request_find(fc, oh.unique); err = -EINVAL; if (!req) @@ -830,7 +820,11 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) return mask; } -/* Abort all requests on the given list (pending or processing) */ +/* + * Abort all requests on the given list (pending or processing) + * + * This function releases and reacquires fuse_lock + */ static void end_requests(struct fuse_conn *fc, struct list_head *head) { while (!list_empty(head)) { @@ -842,6 +836,74 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) } } +/* + * Abort requests under I/O + * + * The requests are set to interrupted and finished, and the request + * waiter is woken up. This will make request_wait_answer() wait + * until the request is unlocked and then return. + * + * If the request is asynchronous, then the end function needs to be + * called after waiting for the request to be unlocked (if it was + * locked). + */ +static void end_io_requests(struct fuse_conn *fc) +{ + while (!list_empty(&fc->io)) { + struct fuse_req *req = + list_entry(fc->io.next, struct fuse_req, list); + void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; + + req->interrupted = 1; + req->out.h.error = -ECONNABORTED; + req->state = FUSE_REQ_FINISHED; + list_del_init(&req->list); + wake_up(&req->waitq); + if (end) { + req->end = NULL; + /* The end function will consume this reference */ + __fuse_get_request(req); + spin_unlock(&fuse_lock); + wait_event(req->waitq, !req->locked); + end(fc, req); + spin_lock(&fuse_lock); + } + } +} + +/* + * Abort all requests. + * + * Emergency exit in case of a malicious or accidental deadlock, or + * just a hung filesystem. + * + * The same effect is usually achievable through killing the + * filesystem daemon and all users of the filesystem. The exception + * is the combination of an asynchronous request and the tricky + * deadlock (see Documentation/filesystems/fuse.txt). + * + * During the aborting, progression of requests from the pending and + * processing lists onto the io list, and progression of new requests + * onto the pending list is prevented by req->connected being false. + * + * Progression of requests under I/O to the processing list is + * prevented by the req->interrupted flag being true for these + * requests. For this reason requests on the io list must be aborted + * first. + */ +void fuse_abort_conn(struct fuse_conn *fc) +{ + spin_lock(&fuse_lock); + if (fc->connected) { + fc->connected = 0; + end_io_requests(fc); + end_requests(fc, &fc->pending); + end_requests(fc, &fc->processing); + wake_up_all(&fc->waitq); + } + spin_unlock(&fuse_lock); +} + static int fuse_dev_release(struct inode *inode, struct file *file) { struct fuse_conn *fc; @@ -852,9 +914,11 @@ static int fuse_dev_release(struct inode *inode, struct file *file) fc->connected = 0; end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); - fuse_release_conn(fc); } spin_unlock(&fuse_lock); + if (fc) + kobject_put(&fc->kobj); + return 0; }