diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2014-06-06 14:37:48 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 16:08:15 -0700 |
commit | 2f2ed41dcaec34f2d6f224aa84efcc5a9dd8d5c3 (patch) | |
tree | 4122b855351d0747a49e47c4d675f0f83df7eafa /ipc/sem.c | |
parent | 1994862dc9c16f360a9169a4d27200d15ba29713 (diff) | |
download | linux-2f2ed41dcaec34f2d6f224aa84efcc5a9dd8d5c3.tar.bz2 |
ipc/sem.c: remove code duplication
count_semzcnt and count_semncnt are more of less identical. The patch
creates a single function that either counts the number of tasks waiting
for zero or waiting due to a decrease operation.
Compared to the initial version, the BUG_ONs were removed.
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 107 |
1 files changed, 51 insertions, 56 deletions
diff --git a/ipc/sem.c b/ipc/sem.c index 4321fa420fe1..d73b3eff80f0 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -47,8 +47,7 @@ * Thus: Perfect SMP scaling between independent semaphore arrays. * If multiple semaphores in one array are used, then cache line * trashing on the semaphore array spinlock will limit the scaling. - * - semncnt and semzcnt are calculated on demand in count_semncnt() and - * count_semzcnt() + * - semncnt and semzcnt are calculated on demand in count_semcnt() * - the task that performs a successful semop() scans the list of all * sleeping tasks and completes any pending operations that can be fulfilled. * Semaphores are actively given to waiting tasks (necessary for FIFO). @@ -989,6 +988,31 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop set_semotime(sma, sops); } +/* + * check_qop: Test how often a queued operation sleeps on the semaphore semnum + */ +static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, + bool count_zero) +{ + struct sembuf *sops = q->sops; + int nsops = q->nsops; + int i, semcnt; + + semcnt = 0; + + for (i = 0; i < nsops; i++) { + if (sops[i].sem_num != semnum) + continue; + if (sops[i].sem_flg & IPC_NOWAIT) + continue; + if (count_zero && sops[i].sem_op == 0) + semcnt++; + if (!count_zero && sops[i].sem_op < 0) + semcnt++; + } + return semcnt; +} + /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero @@ -998,66 +1022,37 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * The counts we return here are a rough approximation, but still * warrant that semncnt+semzcnt>0 if the task is on the pending queue. */ -static int count_semncnt(struct sem_array *sma, ushort semnum) +static int count_semcnt(struct sem_array *sma, ushort semnum, + bool count_zero) { - int semncnt; + struct list_head *l; struct sem_queue *q; + int semcnt; - semncnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { - struct sembuf *sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) - semncnt++; - } - - list_for_each_entry(q, &sma->pending_alter, list) { - struct sembuf *sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op < 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semncnt++; - } - return semncnt; -} - -static int count_semzcnt(struct sem_array *sma, ushort semnum) -{ - int semzcnt; - struct sem_queue *q; + semcnt = 0; + /* First: check the simple operations. They are easy to evaluate */ + if (count_zero) + l = &sma->sem_base[semnum].pending_const; + else + l = &sma->sem_base[semnum].pending_alter; - semzcnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { - struct sembuf *sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) - semzcnt++; + list_for_each_entry(q, l, list) { + /* all task on a per-semaphore list sleep on exactly + * that semaphore + */ + semcnt++; } - list_for_each_entry(q, &sma->pending_const, list) { - struct sembuf *sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; - } + /* Then: check the complex operations. */ list_for_each_entry(q, &sma->pending_alter, list) { - struct sembuf *sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; + semcnt += check_qop(sma, semnum, q, count_zero); + } + if (count_zero) { + list_for_each_entry(q, &sma->pending_const, list) { + semcnt += check_qop(sma, semnum, q, count_zero); + } } - return semzcnt; + return semcnt; } /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked @@ -1459,10 +1454,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = curr->sempid; goto out_unlock; case GETNCNT: - err = count_semncnt(sma, semnum); + err = count_semcnt(sma, semnum, 0); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma, semnum); + err = count_semcnt(sma, semnum, 1); goto out_unlock; } |