summaryrefslogtreecommitdiffstats
path: root/ipc/sem.c
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c171
1 files changed, 91 insertions, 80 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index bee555417312..454f6c6020a8 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -47,8 +47,7 @@
* Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling.
- * - semncnt and semzcnt are calculated on demand in count_semncnt() and
- * count_semzcnt()
+ * - semncnt and semzcnt are calculated on demand in count_semcnt()
* - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO).
@@ -87,7 +86,7 @@
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "util.h"
/* One semaphore structure for each semaphore in the system. */
@@ -110,6 +109,7 @@ struct sem_queue {
int pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
+ struct sembuf *blocking; /* the operation that blocked */
int nsops; /* number of operations */
int alter; /* does *sops alter the array? */
};
@@ -160,7 +160,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* sem_array.pending{_alter,_cont},
* sem_array.sem_undo: global sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field.
- *
+ *
* sem_array.sem_base[i].pending_{const,alter}:
* global or semaphore sem_lock() for read/write
*/
@@ -564,7 +564,11 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
- struct ipc_ops sem_ops;
+ static const struct ipc_ops sem_ops = {
+ .getnew = newary,
+ .associate = sem_security,
+ .more_checks = sem_more_checks,
+ };
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
@@ -572,10 +576,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
- sem_ops.getnew = newary;
- sem_ops.associate = sem_security;
- sem_ops.more_checks = sem_more_checks;
-
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
@@ -586,21 +586,23 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
/**
* perform_atomic_semop - Perform (if possible) a semaphore operation
* @sma: semaphore array
- * @sops: array with operations that should be checked
- * @nsops: number of operations
- * @un: undo array
- * @pid: pid that did the change
+ * @q: struct sem_queue that describes the operation
*
* Returns 0 if the operation was possible.
* Returns 1 if the operation is impossible, the caller must sleep.
* Negative values are error codes.
*/
-static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
- int nsops, struct sem_undo *un, int pid)
+static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
{
- int result, sem_op;
+ int result, sem_op, nsops, pid;
struct sembuf *sop;
struct sem *curr;
+ struct sembuf *sops;
+ struct sem_undo *un;
+
+ sops = q->sops;
+ nsops = q->nsops;
+ un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
@@ -628,6 +630,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
}
sop--;
+ pid = q->pid;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
sop--;
@@ -640,6 +643,8 @@ out_of_range:
goto undo;
would_block:
+ q->blocking = sop;
+
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
@@ -780,8 +785,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
- error = perform_atomic_semop(sma, q->sops, q->nsops,
- q->undo, q->pid);
+ error = perform_atomic_semop(sma, q);
if (error <= 0) {
/* operation completed, remove from queue & wakeup */
@@ -893,8 +897,7 @@ again:
if (semnum != -1 && sma->sem_base[semnum].semval == 0)
break;
- error = perform_atomic_semop(sma, q->sops, q->nsops,
- q->undo, q->pid);
+ error = perform_atomic_semop(sma, q);
/* Does q->sleeper still need to sleep? */
if (error > 0)
@@ -989,65 +992,74 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
set_semotime(sma, sops);
}
-/* The following counts are associated to each semaphore:
- * semncnt number of tasks waiting on semval being nonzero
- * semzcnt number of tasks waiting on semval being zero
- * This model assumes that a task waits on exactly one semaphore.
- * Since semaphore operations are to be performed atomically, tasks actually
- * wait on a whole sequence of semaphores simultaneously.
- * The counts we return here are a rough approximation, but still
- * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
+/*
+ * check_qop: Test if a queued operation sleeps on the semaphore semnum
*/
-static int count_semncnt(struct sem_array *sma, ushort semnum)
+static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
+ bool count_zero)
{
- int semncnt;
- struct sem_queue *q;
+ struct sembuf *sop = q->blocking;
- semncnt = 0;
- list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
- struct sembuf *sops = q->sops;
- BUG_ON(sops->sem_num != semnum);
- if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
- semncnt++;
- }
+ /*
+ * Linux always (since 0.99.10) reported a task as sleeping on all
+ * semaphores. This violates SUS, therefore it was changed to the
+ * standard compliant behavior.
+ * Give the administrators a chance to notice that an application
+ * might misbehave because it relies on the Linux behavior.
+ */
+ pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
+ "The task %s (%d) triggered the difference, watch for misbehavior.\n",
+ current->comm, task_pid_nr(current));
- list_for_each_entry(q, &sma->pending_alter, list) {
- struct sembuf *sops = q->sops;
- int nsops = q->nsops;
- int i;
- for (i = 0; i < nsops; i++)
- if (sops[i].sem_num == semnum
- && (sops[i].sem_op < 0)
- && !(sops[i].sem_flg & IPC_NOWAIT))
- semncnt++;
- }
- return semncnt;
+ if (sop->sem_num != semnum)
+ return 0;
+
+ if (count_zero && sop->sem_op == 0)
+ return 1;
+ if (!count_zero && sop->sem_op < 0)
+ return 1;
+
+ return 0;
}
-static int count_semzcnt(struct sem_array *sma, ushort semnum)
+/* The following counts are associated to each semaphore:
+ * semncnt number of tasks waiting on semval being nonzero
+ * semzcnt number of tasks waiting on semval being zero
+ *
+ * Per definition, a task waits only on the semaphore of the first semop
+ * that cannot proceed, even if additional operation would block, too.
+ */
+static int count_semcnt(struct sem_array *sma, ushort semnum,
+ bool count_zero)
{
- int semzcnt;
+ struct list_head *l;
struct sem_queue *q;
+ int semcnt;
+
+ semcnt = 0;
+ /* First: check the simple operations. They are easy to evaluate */
+ if (count_zero)
+ l = &sma->sem_base[semnum].pending_const;
+ else
+ l = &sma->sem_base[semnum].pending_alter;
- semzcnt = 0;
- list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
- struct sembuf *sops = q->sops;
- BUG_ON(sops->sem_num != semnum);
- if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
- semzcnt++;
+ list_for_each_entry(q, l, list) {
+ /* all task on a per-semaphore list sleep on exactly
+ * that semaphore
+ */
+ semcnt++;
}
- list_for_each_entry(q, &sma->pending_const, list) {
- struct sembuf *sops = q->sops;
- int nsops = q->nsops;
- int i;
- for (i = 0; i < nsops; i++)
- if (sops[i].sem_num == semnum
- && (sops[i].sem_op == 0)
- && !(sops[i].sem_flg & IPC_NOWAIT))
- semzcnt++;
+ /* Then: check the complex operations. */
+ list_for_each_entry(q, &sma->pending_alter, list) {
+ semcnt += check_qop(sma, semnum, q, count_zero);
}
- return semzcnt;
+ if (count_zero) {
+ list_for_each_entry(q, &sma->pending_const, list) {
+ semcnt += check_qop(sma, semnum, q, count_zero);
+ }
+ }
+ return semcnt;
}
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
@@ -1161,7 +1173,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
err = security_sem_semctl(NULL, cmd);
if (err)
return err;
-
+
memset(&seminfo, 0, sizeof(seminfo));
seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns;
@@ -1181,7 +1193,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
}
max_id = ipc_get_maxid(&sem_ids(ns));
up_read(&sem_ids(ns).rwsem);
- if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
+ if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
@@ -1449,10 +1461,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->sempid;
goto out_unlock;
case GETNCNT:
- err = count_semncnt(sma, semnum);
+ err = count_semcnt(sma, semnum, 0);
goto out_unlock;
case GETZCNT:
- err = count_semzcnt(sma, semnum);
+ err = count_semcnt(sma, semnum, 1);
goto out_unlock;
}
@@ -1866,8 +1878,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
if (un && un->semid == -1)
goto out_unlock_free;
- error = perform_atomic_semop(sma, sops, nsops, un,
- task_tgid_vnr(current));
+ queue.sops = sops;
+ queue.nsops = nsops;
+ queue.undo = un;
+ queue.pid = task_tgid_vnr(current);
+ queue.alter = alter;
+
+ error = perform_atomic_semop(sma, &queue);
if (error == 0) {
/* If the operation was successful, then do
* the required updates.
@@ -1883,12 +1900,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
/* We need to sleep on this operation, so we put the current
* task into the pending queue and go to sleep.
*/
-
- queue.sops = sops;
- queue.nsops = nsops;
- queue.undo = un;
- queue.pid = task_tgid_vnr(current);
- queue.alter = alter;
if (nsops == 1) {
struct sem *curr;
@@ -2016,7 +2027,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
- } else
+ } else
tsk->sysvsem.undo_list = NULL;
return 0;