X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Flocks.c;h=8b8388eca05e88bde827d97bc71193247c190e86;hb=bfbc32663d4846039f88c0eccc1956587d89c042;hp=c795eaaf6c4ceb75e5b4a0e9775df8c37ffb9240;hpb=95e3f66fa60a8e573b0b7a58305c5c9fcbca1b70;p=linux-2.6-omap-h63xx.git diff --git a/fs/locks.c b/fs/locks.c index c795eaaf6c4..8b8388eca05 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) +static void init_once(struct kmem_cache *cache, void *foo) { struct file_lock *lock = (struct file_lock *) foo; @@ -534,7 +534,9 @@ static void locks_insert_block(struct file_lock *blocker, static void locks_wake_up_blocks(struct file_lock *blocker) { while (!list_empty(&blocker->fl_block)) { - struct file_lock *waiter = list_entry(blocker->fl_block.next, + struct file_lock *waiter; + + waiter = list_first_entry(&blocker->fl_block, struct file_lock, fl_block); __locks_delete_block(waiter); if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) @@ -668,7 +670,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl) for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { if (!IS_POSIX(cfl)) continue; - if (posix_locks_conflict(cfl, fl)) + if (posix_locks_conflict(fl, cfl)) break; } if (cfl) @@ -694,18 +696,28 @@ EXPORT_SYMBOL(posix_test_lock); * Note: the above assumption may not be true when handling lock requests * from a broken NFS client. But broken NFS clients have a lot more to * worry about than proper deadlock detection anyway... --okir + * + * However, the failure of this assumption (also possible in the case of + * multiple tasks sharing the same open file table) also means there's no + * guarantee that the loop below will terminate. As a hack, we give up + * after a few iterations. */ + +#define MAX_DEADLK_ITERATIONS 10 + static int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl) { - struct list_head *tmp; + struct file_lock *fl; + int i = 0; next_task: if (posix_same_owner(caller_fl, block_fl)) return 1; - list_for_each(tmp, &blocked_list) { - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); + list_for_each_entry(fl, &blocked_list, fl_link) { if (posix_same_owner(fl, block_fl)) { + if (i++ > MAX_DEADLK_ITERATIONS) + return 0; fl = fl->fl_next; block_fl = fl; goto next_task; @@ -715,8 +727,7 @@ next_task: } /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks - * at the head of the list, but that's secret knowledge known only to - * flock_lock_file and posix_lock_file. + * after any leases, but before any posix locks. * * Note that if called with an FL_EXISTS argument, the caller may determine * whether or not a lock was successfully freed by testing the return @@ -733,6 +744,15 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) lock_kernel(); if (request->fl_flags & FL_ACCESS) goto find_conflict; + + if (request->fl_type != F_UNLCK) { + error = -ENOMEM; + new_fl = locks_alloc_lock(); + if (new_fl == NULL) + goto out; + error = 0; + } + for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) @@ -754,10 +774,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) goto out; } - error = -ENOMEM; - new_fl = locks_alloc_lock(); - if (new_fl == NULL) - goto out; /* * If a higher-priority process was blocked on the old file lock, * give it the opportunity to lock the file. @@ -819,7 +835,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str lock_kernel(); if (request->fl_type != F_UNLCK) { for_each_lock(inode, before) { - struct file_lock *fl = *before; + fl = *before; if (!IS_POSIX(fl)) continue; if (!posix_locks_conflict(request, fl)) @@ -1113,7 +1129,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, * If we've been sleeping someone might have * changed the permissions behind our back. */ - if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + if (__mandatory_lock(inode)) continue; } @@ -1337,6 +1353,7 @@ int fcntl_getlease(struct file *filp) int generic_setlease(struct file *filp, long arg, struct file_lock **flp) { struct file_lock *fl, **before, **my_before = NULL, *lease; + struct file_lock *new_fl = NULL; struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; int error, rdlease_count = 0, wrlease_count = 0; @@ -1363,6 +1380,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) || (atomic_read(&inode->i_count) > 1))) goto out; + error = -ENOMEM; + new_fl = locks_alloc_lock(); + if (new_fl == NULL) + goto out; + /* * At this point, we know that if there is an exclusive * lease on this file, then we hold it on this filp @@ -1405,18 +1427,15 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) if (!leases_enable) goto out; - error = -ENOMEM; - fl = locks_alloc_lock(); - if (fl == NULL) - goto out; - - locks_copy_lock(fl, lease); + locks_copy_lock(new_fl, lease); + locks_insert_lock(before, new_fl); - locks_insert_lock(before, fl); + *flp = new_fl; + return 0; - *flp = fl; - error = 0; out: + if (new_fl != NULL) + locks_free_lock(new_fl); return error; } EXPORT_SYMBOL(generic_setlease); @@ -1752,9 +1771,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -1878,9 +1895,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -2062,138 +2077,114 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) EXPORT_SYMBOL_GPL(vfs_cancel_lock); -static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) +#ifdef CONFIG_PROC_FS +#include + +static void lock_get_status(struct seq_file *f, struct file_lock *fl, + int id, char *pfx) { struct inode *inode = NULL; if (fl->fl_file != NULL) inode = fl->fl_file->f_path.dentry->d_inode; - out += sprintf(out, "%d:%s ", id, pfx); + seq_printf(f, "%d:%s ", id, pfx); if (IS_POSIX(fl)) { - out += sprintf(out, "%6s %s ", + seq_printf(f, "%6s %s ", (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", (inode == NULL) ? "*NOINODE*" : - (IS_MANDLOCK(inode) && - (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? - "MANDATORY" : "ADVISORY "); + mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); } else if (IS_FLOCK(fl)) { if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "FLOCK MSNFS "); + seq_printf(f, "FLOCK MSNFS "); } else { - out += sprintf(out, "FLOCK ADVISORY "); + seq_printf(f, "FLOCK ADVISORY "); } } else if (IS_LEASE(fl)) { - out += sprintf(out, "LEASE "); + seq_printf(f, "LEASE "); if (fl->fl_type & F_INPROGRESS) - out += sprintf(out, "BREAKING "); + seq_printf(f, "BREAKING "); else if (fl->fl_file) - out += sprintf(out, "ACTIVE "); + seq_printf(f, "ACTIVE "); else - out += sprintf(out, "BREAKER "); + seq_printf(f, "BREAKER "); } else { - out += sprintf(out, "UNKNOWN UNKNOWN "); + seq_printf(f, "UNKNOWN UNKNOWN "); } if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "%s ", + seq_printf(f, "%s ", (fl->fl_type & LOCK_READ) ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); } else { - out += sprintf(out, "%s ", + seq_printf(f, "%s ", (fl->fl_type & F_INPROGRESS) ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); } if (inode) { #ifdef WE_CAN_BREAK_LSLK_NOW - out += sprintf(out, "%d %s:%ld ", fl->fl_pid, + seq_printf(f, "%d %s:%ld ", fl->fl_pid, inode->i_sb->s_id, inode->i_ino); #else /* userspace relies on this representation of dev_t ;-( */ - out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, + seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); #endif } else { - out += sprintf(out, "%d :0 ", fl->fl_pid); + seq_printf(f, "%d :0 ", fl->fl_pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) - out += sprintf(out, "%Ld EOF\n", fl->fl_start); + seq_printf(f, "%Ld EOF\n", fl->fl_start); else - out += sprintf(out, "%Ld %Ld\n", fl->fl_start, - fl->fl_end); + seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); } else { - out += sprintf(out, "0 EOF\n"); + seq_printf(f, "0 EOF\n"); } } -static void move_lock_status(char **p, off_t* pos, off_t offset) +static int locks_show(struct seq_file *f, void *v) { - int len; - len = strlen(*p); - if(*pos >= offset) { - /* the complete line is valid */ - *p += len; - *pos += len; - return; - } - if(*pos+len > offset) { - /* use the second part of the line */ - int i = offset-*pos; - memmove(*p,*p+i,len-i); - *p += len-i; - *pos += len; - return; - } - /* discard the complete line */ - *pos += len; + struct file_lock *fl, *bfl; + + fl = list_entry(v, struct file_lock, fl_link); + + lock_get_status(f, fl, (long)f->private, ""); + + list_for_each_entry(bfl, &fl->fl_block, fl_block) + lock_get_status(f, bfl, (long)f->private, " ->"); + + f->private++; + return 0; } -/** - * get_locks_status - reports lock usage in /proc/locks - * @buffer: address in userspace to write into - * @start: ? - * @offset: how far we are through the buffer - * @length: how much to read - */ +static void *locks_start(struct seq_file *f, loff_t *pos) +{ + lock_kernel(); + f->private = (void *)1; + return seq_list_start(&file_lock_list, *pos); +} -int get_locks_status(char *buffer, char **start, off_t offset, int length) +static void *locks_next(struct seq_file *f, void *v, loff_t *pos) { - struct list_head *tmp; - char *q = buffer; - off_t pos = 0; - int i = 0; + return seq_list_next(v, &file_lock_list, pos); +} - lock_kernel(); - list_for_each(tmp, &file_lock_list) { - struct list_head *btmp; - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - lock_get_status(q, fl, ++i, ""); - move_lock_status(&q, &pos, offset); - - if(pos >= offset+length) - goto done; - - list_for_each(btmp, &fl->fl_block) { - struct file_lock *bfl = list_entry(btmp, - struct file_lock, fl_block); - lock_get_status(q, bfl, i, " ->"); - move_lock_status(&q, &pos, offset); - - if(pos >= offset+length) - goto done; - } - } -done: +static void locks_stop(struct seq_file *f, void *v) +{ unlock_kernel(); - *start = buffer; - if(q-buffer < length) - return (q-buffer); - return length; } +struct seq_operations locks_seq_operations = { + .start = locks_start, + .next = locks_next, + .stop = locks_stop, + .show = locks_show, +}; +#endif + /** * lock_may_read - checks that the region is free of locks * @inode: the inode that is being read