diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/msg.c | 28 | ||||
-rw-r--r-- | ipc/sem.c | 155 | ||||
-rw-r--r-- | ipc/shm.c | 26 | ||||
-rw-r--r-- | ipc/util.c | 65 | ||||
-rw-r--r-- | ipc/util.h | 23 |
5 files changed, 114 insertions, 183 deletions
diff --git a/ipc/msg.c b/ipc/msg.c index 104926dc72be..5b25e0755656 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -97,11 +97,11 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) static void msg_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct msg_queue *msq = ipc_rcu_to_struct(p); + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); security_msg_queue_free(msq); - ipc_rcu_free(head); + kvfree(msq); } /** @@ -114,12 +114,12 @@ static void msg_rcu_free(struct rcu_head *head) static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; - int id, retval; + int retval; key_t key = params->key; int msgflg = params->flg; - msq = ipc_rcu_alloc(sizeof(*msq)); - if (!msq) + msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + if (unlikely(!msq)) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; @@ -128,7 +128,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(msq, ipc_rcu_free); + kvfree(msq); return retval; } @@ -142,10 +142,10 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_senders); /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; + retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (retval < 0) { + call_rcu(&msq->q_perm.rcu, msg_rcu_free); + return retval; } ipc_unlock_object(&msq->q_perm); @@ -249,7 +249,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); - ipc_rcu_putref(msq, msg_rcu_free); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); } /* @@ -688,7 +688,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, /* enqueue the sender and prepare to block */ ss_add(msq, &s, msgsz); - if (!ipc_rcu_getref(msq)) { + if (!ipc_rcu_getref(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } @@ -700,7 +700,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq, msg_rcu_free); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; diff --git a/ipc/sem.c b/ipc/sem.c index 947dc2348271..9e70cd7a17da 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -87,24 +87,6 @@ #include <linux/uaccess.h> #include "util.h" -/* One semaphore structure for each semaphore in the system. */ -struct sem { - int semval; /* current value */ - /* - * PID of the process that last modified the semaphore. For - * Linux, specifically these are: - * - semop - * - semctl, via SETVAL and SETALL. - * - at task exit when performing undo adjustments (see exit_sem). - */ - int sempid; - spinlock_t lock; /* spinlock for fine-grained semtimedop */ - struct list_head pending_alter; /* pending single-sop operations */ - /* that alter the semaphore */ - struct list_head pending_const; /* pending single-sop operations */ - /* that do not alter the semaphore*/ - time_t sem_otime; /* candidate for sem_otime */ -} ____cacheline_aligned_in_smp; /* One queue for each sleeping process in the system. */ struct sem_queue { @@ -175,7 +157,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); * sem_array.sem_undo * * b) global or semaphore sem_lock() for read/write: - * sem_array.sem_base[i].pending_{const,alter}: + * sem_array.sems[i].pending_{const,alter}: * * c) special: * sem_undo_list.list_proc: @@ -250,7 +232,7 @@ static void unmerge_queues(struct sem_array *sma) */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; - curr = &sma->sem_base[q->sops[0].sem_num]; + curr = &sma->sems[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } @@ -270,7 +252,7 @@ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_splice_init(&sem->pending_alter, &sma->pending_alter); } @@ -278,11 +260,11 @@ static void merge_queues(struct sem_array *sma) static void sem_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct sem_array *sma = ipc_rcu_to_struct(p); + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct sem_array *sma = container_of(p, struct sem_array, sem_perm); security_sem_free(sma); - ipc_rcu_free(head); + kvfree(sma); } /* @@ -306,7 +288,7 @@ static void complexmode_enter(struct sem_array *sma) sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; for (i = 0; i < sma->sem_nsems; i++) { - sem = sma->sem_base + i; + sem = &sma->sems[i]; spin_lock(&sem->lock); spin_unlock(&sem->lock); } @@ -366,7 +348,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, * * Both facts are tracked by use_global_mode. */ - sem = sma->sem_base + sops->sem_num; + sem = &sma->sems[sops->sem_num]; /* * Initial check for use_global_lock. Just an optimization, @@ -421,7 +403,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum) complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm); } else { - struct sem *sem = sma->sem_base + locknum; + struct sem *sem = &sma->sems[locknum]; spin_unlock(&sem->lock); } } @@ -456,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -464,6 +446,24 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ipc_rmid(&sem_ids(ns), &s->sem_perm); } +static struct sem_array *sem_alloc(size_t nsems) +{ + struct sem_array *sma; + size_t size; + + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + + size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); + sma = kvmalloc(size, GFP_KERNEL); + if (unlikely(!sma)) + return NULL; + + memset(sma, 0, size); + + return sma; +} + /** * newary - Create a new semaphore set * @ns: namespace @@ -473,10 +473,8 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) */ static int newary(struct ipc_namespace *ns, struct ipc_params *params) { - int id; int retval; struct sem_array *sma; - int size; key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; @@ -487,29 +485,24 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof(*sma) + nsems * sizeof(struct sem); - sma = ipc_rcu_alloc(size); + sma = sem_alloc(nsems); if (!sma) return -ENOMEM; - memset(sma, 0, size); - sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(sma, ipc_rcu_free); + kvfree(sma); return retval; } - sma->sem_base = (struct sem *) &sma[1]; - for (i = 0; i < nsems; i++) { - INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); - INIT_LIST_HEAD(&sma->sem_base[i].pending_const); - spin_lock_init(&sma->sem_base[i].lock); + INIT_LIST_HEAD(&sma->sems[i].pending_alter); + INIT_LIST_HEAD(&sma->sems[i].pending_const); + spin_lock_init(&sma->sems[i].lock); } sma->complex_count = 0; @@ -520,10 +513,10 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); - id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if (id < 0) { - ipc_rcu_putref(sma, sem_rcu_free); - return id; + retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); + if (retval < 0) { + call_rcu(&sma->sem_perm.rcu, sem_rcu_free); + return retval; } ns->used_sems += nsems; @@ -612,7 +605,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -639,7 +632,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) sop--; pid = q->pid; while (sop >= sops) { - sma->sem_base[sop->sem_num].sempid = pid; + sma->sems[sop->sem_num].sempid = pid; sop--; } @@ -661,7 +654,7 @@ undo: sop--; while (sop >= sops) { sem_op = sop->sem_op; - sma->sem_base[sop->sem_num].semval -= sem_op; + sma->sems[sop->sem_num].semval -= sem_op; if (sop->sem_flg & SEM_UNDO) un->semadj[sop->sem_num] += sem_op; sop--; @@ -692,7 +685,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) * until the operations can go through. */ for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -716,7 +709,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) } for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -815,7 +808,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum, if (semnum == -1) pending_list = &sma->pending_const; else - pending_list = &sma->sem_base[semnum].pending_const; + pending_list = &sma->sems[semnum].pending_const; list_for_each_entry_safe(q, tmp, pending_list, list) { int error = perform_atomic_semop(sma, q); @@ -856,7 +849,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; - if (sma->sem_base[num].semval == 0) { + if (sma->sems[num].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, num, wake_q); } @@ -867,7 +860,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * Assume all were changed. */ for (i = 0; i < sma->sem_nsems; i++) { - if (sma->sem_base[i].semval == 0) { + if (sma->sems[i].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, i, wake_q); } @@ -909,7 +902,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *w if (semnum == -1) pending_list = &sma->pending_alter; else - pending_list = &sma->sem_base[semnum].pending_alter; + pending_list = &sma->sems[semnum].pending_alter; again: list_for_each_entry_safe(q, tmp, pending_list, list) { @@ -922,7 +915,7 @@ again: * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ - if (semnum != -1 && sma->sem_base[semnum].semval == 0) + if (semnum != -1 && sma->sems[semnum].semval == 0) break; error = perform_atomic_semop(sma, q); @@ -959,9 +952,9 @@ again: static void set_semotime(struct sem_array *sma, struct sembuf *sops) { if (sops == NULL) { - sma->sem_base[0].sem_otime = get_seconds(); + sma->sems[0].sem_otime = get_seconds(); } else { - sma->sem_base[sops[0].sem_num].sem_otime = + sma->sems[sops[0].sem_num].sem_otime = get_seconds(); } } @@ -1067,9 +1060,9 @@ static int count_semcnt(struct sem_array *sma, ushort semnum, semcnt = 0; /* First: check the simple operations. They are easy to evaluate */ if (count_zero) - l = &sma->sem_base[semnum].pending_const; + l = &sma->sems[semnum].pending_const; else - l = &sma->sem_base[semnum].pending_alter; + l = &sma->sems[semnum].pending_alter; list_for_each_entry(q, l, list) { /* all task on a per-semaphore list sleep on exactly @@ -1124,7 +1117,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); @@ -1142,7 +1135,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) wake_up_q(&wake_q); ns->used_sems -= sma->sem_nsems; - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) @@ -1174,9 +1167,9 @@ static time_t get_semotime(struct sem_array *sma) int i; time_t res; - res = sma->sem_base[0].sem_otime; + res = sma->sems[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { - time_t to = sma->sem_base[i].sem_otime; + time_t to = sma->sems[i].sem_otime; if (to > res) res = to; @@ -1325,7 +1318,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, return -EIDRM; } - curr = &sma->sem_base[semnum]; + curr = &sma->sems[semnum]; ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) @@ -1382,15 +1375,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, goto out_unlock; } if (nsems > SEMMSL_FAST) { - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } sem_unlock(sma, -1); rcu_read_unlock(); - sem_io = ipc_alloc(sizeof(ushort)*nsems); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); if (sem_io == NULL) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } @@ -1402,7 +1396,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } } for (i = 0; i < sma->sem_nsems; i++) - sem_io[i] = sma->sem_base[i].semval; + sem_io[i] = sma->sems[i].semval; sem_unlock(sma, -1); rcu_read_unlock(); err = 0; @@ -1415,29 +1409,30 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; goto out_rcu_wakeup; } rcu_read_unlock(); if (nsems > SEMMSL_FAST) { - sem_io = ipc_alloc(sizeof(ushort)*nsems); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); if (sem_io == NULL) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } } if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -ERANGE; goto out_free; } @@ -1450,8 +1445,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } for (i = 0; i < nsems; i++) { - sma->sem_base[i].semval = sem_io[i]; - sma->sem_base[i].sempid = task_tgid_vnr(current); + sma->sems[i].semval = sem_io[i]; + sma->sems[i].sempid = task_tgid_vnr(current); } ipc_assert_locked_object(&sma->sem_perm); @@ -1476,7 +1471,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = -EIDRM; goto out_unlock; } - curr = &sma->sem_base[semnum]; + curr = &sma->sems[semnum]; switch (cmd) { case GETVAL: @@ -1500,7 +1495,7 @@ out_rcu_wakeup: wake_up_q(&wake_q); out_free: if (sem_io != fast_sem_io) - ipc_free(sem_io); + kvfree(sem_io); return err; } @@ -1719,7 +1714,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) } nsems = sma->sem_nsems; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; @@ -1729,7 +1724,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); } @@ -1932,7 +1927,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, */ if (nsops == 1) { struct sem *curr; - curr = &sma->sem_base[sops->sem_num]; + curr = &sma->sems[sops->sem_num]; if (alter) { if (sma->complex_count) { @@ -2146,7 +2141,7 @@ void exit_sem(struct task_struct *tsk) /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { - struct sem *semaphore = &sma->sem_base[i]; + struct sem *semaphore = &sma->sems[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* diff --git a/ipc/shm.c b/ipc/shm.c index f45c7959b264..28a444861a8f 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -174,11 +174,12 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) static void shm_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct shmid_kernel *shp = ipc_rcu_to_struct(p); - + struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, + rcu); + struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, + shm_perm); security_shm_free(shp); - ipc_rcu_free(head); + kvfree(shp); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) @@ -241,7 +242,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user); fput(shm_file); - ipc_rcu_putref(shp, shm_rcu_free); + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); } /* @@ -529,7 +530,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file *file; char name[13]; - int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) @@ -542,8 +542,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = ipc_rcu_alloc(sizeof(*shp)); - if (!shp) + shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + if (unlikely(!shp)) return -ENOMEM; shp->shm_perm.key = key; @@ -553,7 +553,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(shp, ipc_rcu_free); + kvfree(shp); return error; } @@ -598,11 +598,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_file = file; shp->shm_creator = current; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; + error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (error < 0) goto no_id; - } list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); @@ -624,7 +622,7 @@ no_id: user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - ipc_rcu_putref(shp, shm_rcu_free); + call_rcu(&shp->shm_perm.rcu, shm_rcu_free); return error; } diff --git a/ipc/util.c b/ipc/util.c index caec7b1bfaa3..1a2cb02467ab 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -232,6 +232,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) idr_preload(GFP_KERNEL); + atomic_set(&new->refcount, 1); spin_lock_init(&new->lock); new->deleted = false; rcu_read_lock(); @@ -394,70 +395,18 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) ipcp->deleted = true; } -/** - * ipc_alloc - allocate ipc space - * @size: size desired - * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails - */ -void *ipc_alloc(int size) -{ - return kvmalloc(size, GFP_KERNEL); -} - -/** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * - * Free a block created with ipc_alloc(). - */ -void ipc_free(void *ptr) +int ipc_rcu_getref(struct kern_ipc_perm *ptr) { - kvfree(ptr); + return atomic_inc_not_zero(&ptr->refcount); } -/** - * ipc_rcu_alloc - allocate ipc and rcu space - * @size: size desired - * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object or NULL upon failure. - */ -void *ipc_rcu_alloc(int size) +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)) { - /* - * We prepend the allocation with the rcu struct - */ - struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size); - if (unlikely(!out)) - return NULL; - atomic_set(&out->refcount, 1); - return out + 1; -} - -int ipc_rcu_getref(void *ptr) -{ - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; - - return atomic_inc_not_zero(&p->refcount); -} - -void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) -{ - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; - - if (!atomic_dec_and_test(&p->refcount)) + if (!atomic_dec_and_test(&ptr->refcount)) return; - call_rcu(&p->rcu, func); -} - -void ipc_rcu_free(struct rcu_head *head) -{ - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - - kvfree(p); + call_rcu(&ptr->rcu, func); } /** diff --git a/ipc/util.h b/ipc/util.h index 60ddccca464d..c692010e6f0a 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -47,13 +47,6 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { } static inline void shm_exit_ns(struct ipc_namespace *ns) { } #endif -struct ipc_rcu { - struct rcu_head rcu; - atomic_t refcount; -} ____cacheline_aligned_in_smp; - -#define ipc_rcu_to_struct(p) ((void *)(p+1)) - /* * Structure that holds the parameters needed by the ipc operations * (see after) @@ -114,22 +107,18 @@ void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); /* must be called with ipcp locked */ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); -/* for rare, potentially huge allocations. - * both function can sleep - */ -void *ipc_alloc(int size); -void ipc_free(void *ptr); - /* * For allocation that need to be freed by RCU. * Objects are reference counted, they start with reference count 1. * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. + * + * refcount is initialized by ipc_addid(), before that point call_rcu() + * must be used. */ -void *ipc_rcu_alloc(int size); -int ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); -void ipc_rcu_free(struct rcu_head *head); +int ipc_rcu_getref(struct kern_ipc_perm *ptr); +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); |