X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=ipc%2Fsem.c;h=0b45a4d383c6ae662845f5e26c6bc19f25c10a22;hb=1baccff8a5823b51d7bf2740ef3ef17e06bfa7e2;hp=446c8f51804542be6424cd343cdd213b811f36ec;hpb=03f02c7657f7948ab980280c54c9366f962b1474;p=linux-2.6-omap-h63xx.git diff --git a/ipc/sem.c b/ipc/sem.c index 446c8f51804..0b45a4d383c 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -80,24 +80,21 @@ #include #include #include -#include +#include #include +#include #include #include "util.h" -#define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) +#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) -#define sem_checkid(ns, sma, semid) \ - ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) -#define sem_buildid(ns, id, seq) \ - ipc_buildid(&sem_ids(ns), id, seq) - -static struct ipc_ids init_sem_ids; +#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) +#define sem_buildid(id, seq) ipc_buildid(id, seq) static int newary(struct ipc_namespace *, struct ipc_params *); -static void freeary(struct ipc_namespace *, struct sem_array *); +static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #endif @@ -119,65 +116,57 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define sc_semopm sem_ctls[2] #define sc_semmni sem_ctls[3] -static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) +void sem_init_ns(struct ipc_namespace *ns) { - ns->ids[IPC_SEM_IDS] = ids; ns->sc_semmsl = SEMMSL; ns->sc_semmns = SEMMNS; ns->sc_semopm = SEMOPM; ns->sc_semmni = SEMMNI; ns->used_sems = 0; - ipc_init_ids(ids); -} - -int sem_init_ns(struct ipc_namespace *ns) -{ - struct ipc_ids *ids; - - ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); - if (ids == NULL) - return -ENOMEM; - - __sem_init_ns(ns, ids); - return 0; + ipc_init_ids(&ns->ids[IPC_SEM_IDS]); } +#ifdef CONFIG_IPC_NS void sem_exit_ns(struct ipc_namespace *ns) { - struct sem_array *sma; - int next_id; - int total, in_use; - - mutex_lock(&sem_ids(ns).mutex); - - in_use = sem_ids(ns).in_use; - - for (total = 0, next_id = 0; total < in_use; next_id++) { - sma = idr_find(&sem_ids(ns).ipcs_idr, next_id); - if (sma == NULL) - continue; - ipc_lock_by_ptr(&sma->sem_perm); - freeary(ns, sma); - total++; - } - mutex_unlock(&sem_ids(ns).mutex); - - kfree(ns->ids[IPC_SEM_IDS]); - ns->ids[IPC_SEM_IDS] = NULL; + free_ipcs(ns, &sem_ids(ns), freeary); } +#endif void __init sem_init (void) { - __sem_init_ns(&init_ipc_ns, &init_sem_ids); + sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime\n", IPC_SEM_IDS, sysvipc_sem_proc_show); } +/* + * This routine is called in the paths where the rw_mutex is held to protect + * access to the idr tree. + */ +static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns, + int id) +{ + struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id); + + if (IS_ERR(ipcp)) + return (struct sem_array *)ipcp; + + return container_of(ipcp, struct sem_array, sem_perm); +} + +/* + * sem_lock_(check_) routines are called in the paths where the rw_mutex + * is not held. + */ static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); + if (IS_ERR(ipcp)) + return (struct sem_array *)ipcp; + return container_of(ipcp, struct sem_array, sem_perm); } @@ -186,6 +175,9 @@ static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, { struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); + if (IS_ERR(ipcp)) + return (struct sem_array *)ipcp; + return container_of(ipcp, struct sem_array, sem_perm); } @@ -228,6 +220,14 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) */ #define IN_WAKEUP 1 +/** + * newary - Create a new semaphore set + * @ns: namespace + * @params: ptr to the structure that contains key, semflg and nsems + * + * Called with sem_ids.rw_mutex held (as a writer) + */ + static int newary(struct ipc_namespace *ns, struct ipc_params *params) { int id; @@ -261,14 +261,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) } id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if(id == -1) { + if (id < 0) { security_sem_free(sma); ipc_rcu_putref(sma); - return -ENOSPC; + return id; } ns->used_sems += nsems; - sma->sem_perm.id = sem_buildid(ns, id, sma->sem_perm.seq); + sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq); sma->sem_base = (struct sem *) &sma[1]; /* sma->sem_pending = NULL; */ sma->sem_pending_last = &sma->sem_pending; @@ -281,6 +281,9 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) } +/* + * Called with sem_ids.rw_mutex and ipcp locked. + */ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) { struct sem_array *sma; @@ -289,6 +292,9 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) return security_sem_associate(sma, semflg); } +/* + * Called with sem_ids.rw_mutex and ipcp locked. + */ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { @@ -514,14 +520,15 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and - * the spinlock for this semaphore set hold. sem_ids.mutex remains locked - * on exit. +/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked + * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex + * remains locked on exit. */ -static void freeary(struct ipc_namespace *ns, struct sem_array *sma) +static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct sem_undo *un; struct sem_queue *q; + struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); /* Invalidate the existing undo structures for this semaphore set. * (They will be freed without any further action in exit_sem() @@ -575,8 +582,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } -static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, - int cmd, int version, union semun arg) +static int semctl_nolock(struct ipc_namespace *ns, int semid, + int cmd, int version, union semun arg) { int err = -EINVAL; struct sem_array *sma; @@ -601,7 +608,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - mutex_lock(&sem_ids(ns).mutex); + down_read(&sem_ids(ns).rw_mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids(ns).in_use; seminfo.semaem = ns->used_sems; @@ -610,19 +617,28 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, seminfo.semaem = SEMAEM; } max_id = ipc_get_maxid(&sem_ids(ns)); - mutex_unlock(&sem_ids(ns).mutex); + up_read(&sem_ids(ns).rw_mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; } + case IPC_STAT: case SEM_STAT: { struct semid64_ds tbuf; int id; - sma = sem_lock(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); + if (cmd == SEM_STAT) { + sma = sem_lock(ns, semid); + if (IS_ERR(sma)) + return PTR_ERR(sma); + id = sma->sem_perm.id; + } else { + sma = sem_lock_check(ns, semid); + if (IS_ERR(sma)) + return PTR_ERR(sma); + id = 0; + } err = -EACCES; if (ipcperms (&sma->sem_perm, S_IRUGO)) @@ -632,8 +648,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, if (err) goto out_unlock; - id = sma->sem_perm.id; - memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); @@ -768,19 +782,6 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = 0; goto out_unlock; } - case IPC_STAT: - { - struct semid64_ds tbuf; - memset(&tbuf,0,sizeof(tbuf)); - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = sma->sem_otime; - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - sem_unlock(sma); - if (copy_semid_to_user (arg.buf, &tbuf, version)) - return -EFAULT; - return 0; - } /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ } err = -EINVAL; @@ -881,7 +882,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, if(copy_semid_from_user (&setbuf, arg.buf, version)) return -EFAULT; } - sma = sem_lock_check(ns, semid); + sma = sem_lock_check_down(ns, semid); if (IS_ERR(sma)) return PTR_ERR(sma); @@ -908,7 +909,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, switch(cmd){ case IPC_RMID: - freeary(ns, sma); + freeary(ns, ipcp); err = 0; break; case IPC_SET: @@ -947,60 +948,30 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) switch(cmd) { case IPC_INFO: case SEM_INFO: + case IPC_STAT: case SEM_STAT: - err = semctl_nolock(ns,semid,semnum,cmd,version,arg); + err = semctl_nolock(ns, semid, cmd, version, arg); return err; case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: - case IPC_STAT: case SETVAL: case SETALL: err = semctl_main(ns,semid,semnum,cmd,version,arg); return err; case IPC_RMID: case IPC_SET: - mutex_lock(&sem_ids(ns).mutex); + down_write(&sem_ids(ns).rw_mutex); err = semctl_down(ns,semid,semnum,cmd,version,arg); - mutex_unlock(&sem_ids(ns).mutex); + up_write(&sem_ids(ns).rw_mutex); return err; default: return -EINVAL; } } -static inline void lock_semundo(void) -{ - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_lock(&undo_list->lock); -} - -/* This code has an interaction with copy_semundo(). - * Consider; two tasks are sharing the undo_list. task1 - * acquires the undo_list lock in lock_semundo(). If task2 now - * exits before task1 releases the lock (by calling - * unlock_semundo()), then task1 will never call spin_unlock(). - * This leave the sem_undo_list in a locked state. If task1 now creats task3 - * and once again shares the sem_undo_list, the sem_undo_list will still be - * locked, and future SEM_UNDO operations will deadlock. This case is - * dealt with in copy_semundo() by having it reinitialize the spin lock when - * the refcnt goes from 1 to 2. - */ -static inline void unlock_semundo(void) -{ - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_unlock(&undo_list->lock); -} - - /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE @@ -1061,9 +1032,9 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) if (error) return ERR_PTR(error); - lock_semundo(); + spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); - unlock_semundo(); + spin_unlock(&ulp->lock); if (likely(un!=NULL)) goto out; @@ -1086,10 +1057,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) new->semadj = (short *) &new[1]; new->semid = semid; - lock_semundo(); + spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); if (un) { - unlock_semundo(); + spin_unlock(&ulp->lock); kfree(new); ipc_lock_by_ptr(&sma->sem_perm); ipc_rcu_putref(sma); @@ -1100,7 +1071,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ipc_rcu_putref(sma); if (sma->sem_perm.deleted) { sem_unlock(sma); - unlock_semundo(); + spin_unlock(&ulp->lock); kfree(new); un = ERR_PTR(-EIDRM); goto out; @@ -1111,7 +1082,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) sma->undo = new; sem_unlock(sma); un = new; - unlock_semundo(); + spin_unlock(&ulp->lock); out: return un; } @@ -1287,10 +1258,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. - * - * See the notes above unlock_semundo() regarding the spin_lock_init() - * in this code. Initialize the undo_list->lock here instead of get_undo_list() - * because of the reasoning in the comment above unlock_semundo. */ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) @@ -1356,7 +1323,7 @@ void exit_sem(struct task_struct *tsk) if (u->semid == -1) goto next_entry; - BUG_ON(sem_checkid(ns,sma,u->semid)); + BUG_ON(sem_checkid(sma, u->semid)); /* remove u from the sma->undo list */ for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {