X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Ffile.c;h=51aef675470fb64984733fe9a710adc7cb6b980a;hb=471b17e7ed0cb29338458eaa09103902891d802a;hp=55f4e70225631b275f85215ee543b104507caacc;hpb=d4965b3e2ff94d0c7b7e6e7e9794b54950a2f4b9;p=linux-2.6-omap-h63xx.git diff --git a/fs/file.c b/fs/file.c index 55f4e702256..51aef675470 100644 --- a/fs/file.c +++ b/fs/file.c @@ -21,7 +21,6 @@ struct fdtable_defer { spinlock_t lock; struct work_struct wq; - struct timer_list timer; struct fdtable *next; }; @@ -75,24 +74,10 @@ static void __free_fdtable(struct fdtable *fdt) kfree(fdt); } -static void fdtable_timer(unsigned long data) -{ - struct fdtable_defer *fddef = (struct fdtable_defer *)data; - - spin_lock(&fddef->lock); - /* - * If someone already emptied the queue return. - */ - if (!fddef->next) - goto out; - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); -out: - spin_unlock(&fddef->lock); -} - -static void free_fdtable_work(struct fdtable_defer *f) +static void free_fdtable_work(struct work_struct *work) { + struct fdtable_defer *f = + container_of(work, struct fdtable_defer, wq); struct fdtable *fdt; spin_lock_bh(&f->lock); @@ -142,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; - /* - * vmallocs are handled from the workqueue context. - * If the per-cpu workqueue is running, then we - * defer work scheduling through a timer. - */ - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); + /* vmallocs are handled from the workqueue context */ + schedule_work(&fddef->wq); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } @@ -240,13 +220,9 @@ static struct fdtable *alloc_fdtable(int nr) if (!fdt) goto out; - nfds = 8 * L1_CACHE_BYTES; - /* Expand to the max in easy steps */ - while (nfds <= nr) { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } + nfds = max_t(int, 8 * L1_CACHE_BYTES, roundup_pow_of_two(nr + 1)); + if (nfds > NR_OPEN) + nfds = NR_OPEN; new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); @@ -277,96 +253,85 @@ static struct fdtable *alloc_fdtable(int nr) } while (nfds <= nr); new_fds = alloc_fd_array(nfds); if (!new_fds) - goto out; + goto out2; fdt->fd = new_fds; fdt->max_fds = nfds; fdt->free_files = NULL; return fdt; +out2: + nfds = fdt->max_fdset; out: - if (new_openset) - free_fdset(new_openset, nfds); - if (new_execset) - free_fdset(new_execset, nfds); + free_fdset(new_openset, nfds); + free_fdset(new_execset, nfds); kfree(fdt); return NULL; } /* - * Expands the file descriptor table - it will allocate a new fdtable and - * both fd array and fdset. It is expected to be called with the - * files_lock held. + * Expand the file descriptor table. + * This function will allocate a new fdtable and both fd array and fdset, of + * the given size. + * Return <0 error code on error; 1 on successful completion. + * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_fdtable(struct files_struct *files, int nr) __releases(files->file_lock) __acquires(files->file_lock) { - int error = 0; - struct fdtable *fdt; - struct fdtable *nfdt = NULL; + struct fdtable *new_fdt, *cur_fdt; spin_unlock(&files->file_lock); - nfdt = alloc_fdtable(nr); - if (!nfdt) { - error = -ENOMEM; - spin_lock(&files->file_lock); - goto out; - } - + new_fdt = alloc_fdtable(nr); spin_lock(&files->file_lock); - fdt = files_fdtable(files); + if (!new_fdt) + return -ENOMEM; /* - * Check again since another task may have expanded the - * fd table while we dropped the lock + * Check again since another task may have expanded the fd table while + * we dropped the lock */ - if (nr >= fdt->max_fds || nr >= fdt->max_fdset) { - copy_fdtable(nfdt, fdt); + cur_fdt = files_fdtable(files); + if (nr >= cur_fdt->max_fds || nr >= cur_fdt->max_fdset) { + /* Continue as planned */ + copy_fdtable(new_fdt, cur_fdt); + rcu_assign_pointer(files->fdt, new_fdt); + free_fdtable(cur_fdt); } else { - /* Somebody expanded while we dropped file_lock */ - spin_unlock(&files->file_lock); - __free_fdtable(nfdt); - spin_lock(&files->file_lock); - goto out; + /* Somebody else expanded, so undo our attempt */ + __free_fdtable(new_fdt); } - rcu_assign_pointer(files->fdt, nfdt); - free_fdtable(fdt); -out: - return error; + return 1; } /* * Expand files. - * Return <0 on error; 0 nothing done; 1 files expanded, we may have blocked. - * Should be called with the files->file_lock spinlock held for write. + * This function will expand the file structures, if the requested size exceeds + * the current capacity and there is room for expansion. + * Return <0 error code on error; 0 when nothing done; 1 when files were + * expanded and execution may have blocked. + * The files->file_lock should be held on entry, and will be held on exit. */ int expand_files(struct files_struct *files, int nr) { - int err, expand = 0; struct fdtable *fdt; fdt = files_fdtable(files); - if (nr >= fdt->max_fdset || nr >= fdt->max_fds) { - if (fdt->max_fdset >= NR_OPEN || - fdt->max_fds >= NR_OPEN || nr >= NR_OPEN) { - err = -EMFILE; - goto out; - } - expand = 1; - if ((err = expand_fdtable(files, nr))) - goto out; - } - err = expand; -out: - return err; + /* Do we need to expand? */ + if (nr < fdt->max_fdset && nr < fdt->max_fds) + return 0; + /* Can we expand? */ + if (fdt->max_fdset >= NR_OPEN || fdt->max_fds >= NR_OPEN || + nr >= NR_OPEN) + return -EMFILE; + + /* All good, so we try */ + return expand_fdtable(files, nr); } static void __devinit fdtable_defer_list_init(int cpu) { struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); spin_lock_init(&fddef->lock); - INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); - init_timer(&fddef->timer); - fddef->timer.data = (unsigned long)fddef; - fddef->timer.function = fdtable_timer; + INIT_WORK(&fddef->wq, free_fdtable_work); fddef->next = NULL; }