From 749d811f10a410b64cf4c674c498ec04316ec373 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 3 Jun 2010 20:19:28 +1000 Subject: padata: add parenthesis in MAX_SEQ_NR macro MAX_SEQ_NR is used in padata_alloc_pd() like this: pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; It needs parenthesis or the divide by num_cpus takes precedence over the subtraction. Signed-off-by: Dan Carpenter Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index b1c9857f8402..ff8de1b71e4e 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -28,7 +28,7 @@ #include #include -#define MAX_SEQ_NR INT_MAX - NR_CPUS +#define MAX_SEQ_NR (INT_MAX - NR_CPUS) #define MAX_OBJ_NUM 1000 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) -- cgit v1.2.3 From 4c879170296174bde05cd1c643dac16594edee77 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:30:10 +0200 Subject: padata: Check for valid padata instance on start This patch introduces the PADATA_INVALID flag which is checked on padata start. This will be used to mark a padata instance as invalid, if the padata cpumask does not intersect with the active cpumask. we change padata_start to return an error if the PADATA_INVALID is set. Also we adapt the only padata user, pcrypt to this change. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 19 ++++++++++++++----- include/linux/padata.h | 3 ++- kernel/padata.c | 18 ++++++++++++++++-- 3 files changed, 32 insertions(+), 8 deletions(-) (limited to 'kernel/padata.c') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 247178cb98ec..71ae2b2ae33b 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -385,6 +385,7 @@ static struct crypto_template pcrypt_tmpl = { static int __init pcrypt_init(void) { + int err = -ENOMEM; encwq = create_workqueue("pencrypt"); if (!encwq) goto err; @@ -400,14 +401,22 @@ static int __init pcrypt_init(void) pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); if (!pcrypt_dec_padata) - goto err_free_padata; + goto err_free_enc_padata; - padata_start(pcrypt_enc_padata); - padata_start(pcrypt_dec_padata); + err = padata_start(pcrypt_enc_padata); + if (err) + goto err_free_dec_padata; + + err = padata_start(pcrypt_dec_padata); + if (err) + goto err_free_dec_padata; return crypto_register_template(&pcrypt_tmpl); -err_free_padata: +err_free_dec_padata: + padata_free(pcrypt_dec_padata); + +err_free_enc_padata: padata_free(pcrypt_enc_padata); err_destroy_decwq: @@ -417,7 +426,7 @@ err_destroy_encwq: destroy_workqueue(encwq); err: - return -ENOMEM; + return err; } static void __exit pcrypt_exit(void) diff --git a/include/linux/padata.h b/include/linux/padata.h index 8d8406246eef..e4c17f9b7c9e 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -126,6 +126,7 @@ struct padata_instance { u8 flags; #define PADATA_INIT 1 #define PADATA_RESET 2 +#define PADATA_INVALID 4 }; extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, @@ -138,6 +139,6 @@ extern int padata_set_cpumask(struct padata_instance *pinst, cpumask_var_t cpumask); extern int padata_add_cpu(struct padata_instance *pinst, int cpu); extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); -extern void padata_start(struct padata_instance *pinst); +extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index ff8de1b71e4e..e7d723a3e31d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -485,6 +485,11 @@ static void padata_flush_queues(struct parallel_data *pd) BUG_ON(atomic_read(&pd->refcnt) != 0); } +static void __padata_start(struct padata_instance *pinst) +{ + pinst->flags |= PADATA_INIT; +} + /* Replace the internal control stucture with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) @@ -619,11 +624,20 @@ EXPORT_SYMBOL(padata_remove_cpu); * * @pinst: padata instance to start */ -void padata_start(struct padata_instance *pinst) +int padata_start(struct padata_instance *pinst) { + int err = 0; + mutex_lock(&pinst->lock); - pinst->flags |= PADATA_INIT; + + if (pinst->flags & PADATA_INVALID) + err =-EINVAL; + + __padata_start(pinst); + mutex_unlock(&pinst->lock); + + return err; } EXPORT_SYMBOL(padata_start); -- cgit v1.2.3 From ee836555120140f770005b8ce6673c913d1b9a98 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:30:47 +0200 Subject: padata: Block until the instance is unused on stop This patch makes padata_stop to block until the padata instance is unused. Also we split padata_stop to a locked and a unlocked version. This is in preparation to be able to change the cpumask after a call to patata stop. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index e7d723a3e31d..9e18dfa372a9 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -490,6 +490,20 @@ static void __padata_start(struct padata_instance *pinst) pinst->flags |= PADATA_INIT; } +static void __padata_stop(struct padata_instance *pinst) +{ + if (!(pinst->flags & PADATA_INIT)) + return; + + pinst->flags &= ~PADATA_INIT; + + synchronize_rcu(); + + get_online_cpus(); + padata_flush_queues(pinst->pd); + put_online_cpus(); +} + /* Replace the internal control stucture with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) @@ -649,7 +663,7 @@ EXPORT_SYMBOL(padata_start); void padata_stop(struct padata_instance *pinst) { mutex_lock(&pinst->lock); - pinst->flags &= ~PADATA_INIT; + __padata_stop(pinst); mutex_unlock(&pinst->lock); } EXPORT_SYMBOL(padata_stop); @@ -770,17 +784,11 @@ EXPORT_SYMBOL(padata_alloc); */ void padata_free(struct padata_instance *pinst) { - padata_stop(pinst); - - synchronize_rcu(); - #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&pinst->cpu_notifier); #endif - get_online_cpus(); - padata_flush_queues(pinst->pd); - put_online_cpus(); + padata_stop(pinst); padata_free_pd(pinst->pd); free_cpumask_var(pinst->cpumask); kfree(pinst); -- cgit v1.2.3 From 33e54450683c5e970ac007489d7921ba792d093c Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:31:26 +0200 Subject: padata: Handle empty padata cpumasks This patch fixes a bug when the padata cpumask does not intersect with the active cpumask. In this case we get a division by zero in padata_alloc_pd and we end up with a useless padata instance. Padata can end up with an empty cpumask for two reasons: 1. A user removed the last cpu that belongs to the padata cpumask and the active cpumask. 2. The last cpu that belongs to the padata cpumask and the active cpumask goes offline. We introduce a function padata_validate_cpumask to check if the padata cpumask does intersect with the active cpumask. If the cpumasks do not intersect we mark the instance as invalid, so it can't be used. We do not allocate the cpumask dependend recources in this case. This fixes the division by zero and keeps the padate instance in a consistent state. It's not possible to trigger this bug by now because the only padata user, pcrypt uses always the possible cpumask. Reported-by: Dan Kruchinin Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 11 deletions(-) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index 9e18dfa372a9..57ec4eb5f2e3 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -516,12 +516,27 @@ static void padata_replace(struct padata_instance *pinst, synchronize_rcu(); - padata_flush_queues(pd_old); - padata_free_pd(pd_old); + if (pd_old) { + padata_flush_queues(pd_old); + padata_free_pd(pd_old); + } pinst->flags &= ~PADATA_RESET; } +/* If cpumask contains no active cpu, we mark the instance as invalid. */ +static bool padata_validate_cpumask(struct padata_instance *pinst, + const struct cpumask *cpumask) +{ + if (!cpumask_intersects(cpumask, cpu_active_mask)) { + pinst->flags |= PADATA_INVALID; + return false; + } + + pinst->flags &= ~PADATA_INVALID; + return true; +} + /** * padata_set_cpumask - set the cpumask that padata should use * @@ -531,11 +546,18 @@ static void padata_replace(struct padata_instance *pinst, int padata_set_cpumask(struct padata_instance *pinst, cpumask_var_t cpumask) { - struct parallel_data *pd; + int valid; int err = 0; + struct parallel_data *pd = NULL; mutex_lock(&pinst->lock); + valid = padata_validate_cpumask(pinst, cpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + get_online_cpus(); pd = padata_alloc_pd(pinst, cpumask); @@ -544,10 +566,14 @@ int padata_set_cpumask(struct padata_instance *pinst, goto out; } +out_replace: cpumask_copy(pinst->cpumask, cpumask); padata_replace(pinst, pd); + if (valid) + __padata_start(pinst); + out: put_online_cpus(); @@ -567,6 +593,9 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu) return -ENOMEM; padata_replace(pinst, pd); + + if (padata_validate_cpumask(pinst, pinst->cpumask)) + __padata_start(pinst); } return 0; @@ -597,9 +626,16 @@ EXPORT_SYMBOL(padata_add_cpu); static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd; + struct parallel_data *pd = NULL; if (cpumask_test_cpu(cpu, cpu_online_mask)) { + + if (!padata_validate_cpumask(pinst, pinst->cpumask)) { + __padata_stop(pinst); + padata_replace(pinst, pd); + goto out; + } + pd = padata_alloc_pd(pinst, pinst->cpumask); if (!pd) return -ENOMEM; @@ -607,6 +643,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) padata_replace(pinst, pd); } +out: return 0; } @@ -732,7 +769,7 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, struct workqueue_struct *wq) { struct padata_instance *pinst; - struct parallel_data *pd; + struct parallel_data *pd = NULL; pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) @@ -740,12 +777,14 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, get_online_cpus(); - pd = padata_alloc_pd(pinst, cpumask); - if (!pd) + if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) goto err_free_inst; - if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) - goto err_free_pd; + if (padata_validate_cpumask(pinst, cpumask)) { + pd = padata_alloc_pd(pinst, cpumask); + if (!pd) + goto err_free_mask; + } rcu_assign_pointer(pinst->pd, pd); @@ -767,8 +806,8 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, return pinst; -err_free_pd: - padata_free_pd(pd); +err_free_mask: + free_cpumask_var(pinst->cpumask); err_free_inst: kfree(pinst); put_online_cpus(); -- cgit v1.2.3 From 83f619f3c8abb82cac9158cf23c656ec5c184607 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:32:02 +0200 Subject: padata: make padata_do_parallel to return zero on success To return -EINPROGRESS on success in padata_do_parallel was considered to be odd. This patch changes this to return zero on success. Also the only user of padata, pcrypt is adapted to convert a return of zero to -EINPROGRESS within the crypto layer. This also removes the pcrypt fallback if padata_do_parallel was called on a not running padata instance as we can't handle it anymore. This fallback was unused, so it's save to remove it. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 18 ++++++------------ kernel/padata.c | 11 +++++------ 2 files changed, 11 insertions(+), 18 deletions(-) (limited to 'kernel/padata.c') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 71ae2b2ae33b..6036b6de9079 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -143,10 +143,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req) aead_request_set_assoc(creq, req->assoc, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); - if (err) - return err; - else - err = crypto_aead_encrypt(creq); + if (!err) + return -EINPROGRESS; return err; } @@ -187,10 +185,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req) aead_request_set_assoc(creq, req->assoc, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); - if (err) - return err; - else - err = crypto_aead_decrypt(creq); + if (!err) + return -EINPROGRESS; return err; } @@ -233,10 +229,8 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) aead_givcrypt_set_giv(creq, req->giv, req->seq); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); - if (err) - return err; - else - err = crypto_aead_givencrypt(creq); + if (!err) + return -EINPROGRESS; return err; } diff --git a/kernel/padata.c b/kernel/padata.c index 57ec4eb5f2e3..ae8defcf0622 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -111,10 +111,13 @@ int padata_do_parallel(struct padata_instance *pinst, pd = rcu_dereference(pinst->pd); - err = 0; + err = -EINVAL; if (!(pinst->flags & PADATA_INIT)) goto out; + if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) + goto out; + err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; @@ -122,11 +125,7 @@ int padata_do_parallel(struct padata_instance *pinst, if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) goto out; - err = -EINVAL; - if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) - goto out; - - err = -EINPROGRESS; + err = 0; atomic_inc(&pd->refcnt); padata->pd = pd; padata->cb_cpu = cb_cpu; -- cgit v1.2.3 From 5f1a8c1bc724498ff32acbd59ed5263275676b9d Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:32:39 +0200 Subject: padata: simplify serialization mechanism We count the number of processed objects on a percpu basis, so we need to go through all the percpu reorder queues to calculate the sequence number of the next object that needs serialization. This patch changes this to count the number of processed objects global. So we can calculate the sequence number and the percpu reorder queue of the next object that needs serialization without searching through the percpu reorder queues. This avoids some accesses to memory of foreign cpus. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 ++--- kernel/padata.c | 71 ++++++++++++++------------------------------------ 2 files changed, 22 insertions(+), 55 deletions(-) (limited to 'kernel/padata.c') diff --git a/include/linux/padata.h b/include/linux/padata.h index e4c17f9b7c9e..8844b851191e 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -67,7 +67,6 @@ struct padata_list { * @pwork: work struct for parallelization. * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. - * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ struct padata_queue { @@ -77,7 +76,6 @@ struct padata_queue { struct work_struct pwork; struct work_struct swork; struct parallel_data *pd; - atomic_t num_obj; int cpu_index; }; @@ -93,6 +91,7 @@ struct padata_queue { * @max_seq_nr: Maximal used sequence number. * @cpumask: cpumask in use. * @lock: Reorder lock. + * @processed: Number of already processed objects. * @timer: Reorder timer. */ struct parallel_data { @@ -103,7 +102,8 @@ struct parallel_data { atomic_t refcnt; unsigned int max_seq_nr; cpumask_var_t cpumask; - spinlock_t lock; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; struct timer_list timer; }; diff --git a/kernel/padata.c b/kernel/padata.c index ae8defcf0622..450d67d394b0 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -170,79 +170,47 @@ EXPORT_SYMBOL(padata_do_parallel); */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { - int cpu, num_cpus, empty, calc_seq_nr; - int seq_nr, next_nr, overrun, next_overrun; + int cpu, num_cpus; + int next_nr, next_index; struct padata_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - empty = 0; - next_nr = -1; - next_overrun = 0; - next_queue = NULL; - num_cpus = cpumask_weight(pd->cpumask); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - reorder = &queue->reorder; - - /* - * Calculate the seq_nr of the object that should be - * next in this reorder queue. - */ - overrun = 0; - calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) - + queue->cpu_index; - - if (unlikely(calc_seq_nr > pd->max_seq_nr)) { - calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; - overrun = 1; - } - - if (!list_empty(&reorder->list)) { - padata = list_entry(reorder->list.next, - struct padata_priv, list); - - seq_nr = padata->seq_nr; - BUG_ON(calc_seq_nr != seq_nr); - } else { - seq_nr = calc_seq_nr; - empty++; - } - - if (next_nr < 0 || seq_nr < next_nr - || (next_overrun && !overrun)) { - next_nr = seq_nr; - next_overrun = overrun; - next_queue = queue; - } + /* + * Calculate the percpu reorder queue and the sequence + * number of the next object. + */ + next_nr = pd->processed; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + + if (unlikely(next_nr > pd->max_seq_nr)) { + next_nr = next_nr - pd->max_seq_nr - 1; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + pd->processed = 0; } padata = NULL; - if (empty == num_cpus) - goto out; - reorder = &next_queue->reorder; if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); - if (unlikely(next_overrun)) { - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - atomic_set(&queue->num_obj, 0); - } - } + BUG_ON(next_nr != padata->seq_nr); spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); spin_unlock(&reorder->lock); - atomic_inc(&next_queue->num_obj); + pd->processed++; goto out; } @@ -430,7 +398,6 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, INIT_WORK(&queue->pwork, padata_parallel_worker); INIT_WORK(&queue->swork, padata_serial_worker); - atomic_set(&queue->num_obj, 0); } num_cpus = cpumask_weight(pd->cpumask); -- cgit v1.2.3 From e15bacbebb9dcc95f148f28dfc83a6d5e48b60b8 Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:31:57 +0400 Subject: padata: Make two separate cpumasks The aim of this patch is to make two separate cpumasks for padata parallel and serial workers respectively. It allows user to make more thin and sophisticated configurations of padata framework. For example user may bind parallel and serial workers to non-intersecting CPU groups to gain better performance. Also each padata instance has notifiers chain for its cpumasks now. If either parallel or serial or both masks were changed all interested subsystems will get notification about that. It's especially useful if padata user uses algorithm for callback CPU selection according to serial cpumask. Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 191 ++++++++++++++------ include/linux/padata.h | 116 ++++++++---- kernel/padata.c | 471 ++++++++++++++++++++++++++++++++++++------------- 3 files changed, 564 insertions(+), 214 deletions(-) (limited to 'kernel/padata.c') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 6036b6de9079..c9662e25595e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -24,12 +24,38 @@ #include #include #include +#include #include -static struct padata_instance *pcrypt_enc_padata; -static struct padata_instance *pcrypt_dec_padata; -static struct workqueue_struct *encwq; -static struct workqueue_struct *decwq; +struct pcrypt_instance { + struct padata_instance *pinst; + struct workqueue_struct *wq; + + /* + * Cpumask for callback CPUs. It should be + * equal to serial cpumask of corresponding padata instance, + * so it is updated when padata notifies us about serial + * cpumask change. + * + * cb_cpumask is protected by RCU. This fact prevents us from + * using cpumask_var_t directly because the actual type of + * cpumsak_var_t depends on kernel configuration(particularly on + * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration + * cpumask_var_t may be either a pointer to the struct cpumask + * or a variable allocated on the stack. Thus we can not safely use + * cpumask_var_t with RCU operations such as rcu_assign_pointer or + * rcu_dereference. So cpumask_var_t is wrapped with struct + * pcrypt_cpumask which makes possible to use it with RCU. + */ + struct pcrypt_cpumask { + cpumask_var_t mask; + } *cb_cpumask; + struct notifier_block nblock; +}; + +static struct pcrypt_instance pencrypt; +static struct pcrypt_instance pdecrypt; + struct pcrypt_instance_ctx { struct crypto_spawn spawn; @@ -42,25 +68,29 @@ struct pcrypt_aead_ctx { }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, - struct padata_instance *pinst) + struct pcrypt_instance *pcrypt) { unsigned int cpu_index, cpu, i; + struct pcrypt_cpumask *cpumask; cpu = *cb_cpu; - if (cpumask_test_cpu(cpu, cpu_active_mask)) + rcu_read_lock_bh(); + cpumask = rcu_dereference(pcrypt->cb_cpumask); + if (cpumask_test_cpu(cpu, cpumask->mask)) goto out; - cpu_index = cpu % cpumask_weight(cpu_active_mask); + cpu_index = cpu % cpumask_weight(cpumask->mask); - cpu = cpumask_first(cpu_active_mask); + cpu = cpumask_first(cpumask->mask); for (i = 0; i < cpu_index; i++) - cpu = cpumask_next(cpu, cpu_active_mask); + cpu = cpumask_next(cpu, cpumask->mask); *cb_cpu = cpu; out: - return padata_do_parallel(pinst, padata, cpu); + rcu_read_unlock_bh(); + return padata_do_parallel(pcrypt->pinst, padata, cpu); } static int pcrypt_aead_setkey(struct crypto_aead *parent, @@ -142,7 +172,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_assoc(creq, req->assoc, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; @@ -184,7 +214,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_assoc(creq, req->assoc, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); if (!err) return -EINPROGRESS; @@ -228,7 +258,7 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); aead_givcrypt_set_giv(creq, req->giv, req->seq); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; @@ -370,6 +400,88 @@ static void pcrypt_free(struct crypto_instance *inst) kfree(inst); } +static int pcrypt_cpumask_change_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct pcrypt_instance *pcrypt; + struct pcrypt_cpumask *new_mask, *old_mask; + + if (!(val & PADATA_CPU_SERIAL)) + return 0; + + pcrypt = container_of(self, struct pcrypt_instance, nblock); + new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); + if (!new_mask) + return -ENOMEM; + if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { + kfree(new_mask); + return -ENOMEM; + } + + old_mask = pcrypt->cb_cpumask; + + padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask); + rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); + synchronize_rcu_bh(); + + free_cpumask_var(old_mask->mask); + kfree(old_mask); + return 0; +} + +static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt, + const char *name) +{ + int ret = -ENOMEM; + struct pcrypt_cpumask *mask; + + pcrypt->wq = create_workqueue(name); + if (!pcrypt->wq) + goto err; + + pcrypt->pinst = padata_alloc(pcrypt->wq); + if (!pcrypt->pinst) + goto err_destroy_workqueue; + + mask = kmalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) + goto err_free_padata; + if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { + kfree(mask); + goto err_free_padata; + } + + padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask); + rcu_assign_pointer(pcrypt->cb_cpumask, mask); + + pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; + ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + if (ret) + goto err_free_cpumask; + + return ret; +err_free_cpumask: + free_cpumask_var(mask->mask); + kfree(mask); +err_free_padata: + padata_free(pcrypt->pinst); +err_destroy_workqueue: + destroy_workqueue(pcrypt->wq); +err: + return ret; +} + +static void __pcrypt_deinit_instance(struct pcrypt_instance *pcrypt) +{ + free_cpumask_var(pcrypt->cb_cpumask->mask); + kfree(pcrypt->cb_cpumask); + + padata_stop(pcrypt->pinst); + padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + destroy_workqueue(pcrypt->wq); + padata_free(pcrypt->pinst); +} + static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .alloc = pcrypt_alloc, @@ -379,60 +491,31 @@ static struct crypto_template pcrypt_tmpl = { static int __init pcrypt_init(void) { - int err = -ENOMEM; - encwq = create_workqueue("pencrypt"); - if (!encwq) - goto err; - - decwq = create_workqueue("pdecrypt"); - if (!decwq) - goto err_destroy_encwq; - - - pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq); - if (!pcrypt_enc_padata) - goto err_destroy_decwq; - - pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); - if (!pcrypt_dec_padata) - goto err_free_enc_padata; + int err; - err = padata_start(pcrypt_enc_padata); + err = __pcrypt_init_instance(&pencrypt, "pencrypt"); if (err) - goto err_free_dec_padata; + goto err; - err = padata_start(pcrypt_dec_padata); + err = __pcrypt_init_instance(&pdecrypt, "pdecrypt"); if (err) - goto err_free_dec_padata; - - return crypto_register_template(&pcrypt_tmpl); - -err_free_dec_padata: - padata_free(pcrypt_dec_padata); + goto err_deinit_pencrypt; -err_free_enc_padata: - padata_free(pcrypt_enc_padata); + padata_start(pencrypt.pinst); + padata_start(pdecrypt.pinst); -err_destroy_decwq: - destroy_workqueue(decwq); - -err_destroy_encwq: - destroy_workqueue(encwq); + return crypto_register_template(&pcrypt_tmpl); +err_deinit_pencrypt: + __pcrypt_deinit_instance(&pencrypt); err: return err; } static void __exit pcrypt_exit(void) { - padata_stop(pcrypt_enc_padata); - padata_stop(pcrypt_dec_padata); - - destroy_workqueue(encwq); - destroy_workqueue(decwq); - - padata_free(pcrypt_enc_padata); - padata_free(pcrypt_dec_padata); + __pcrypt_deinit_instance(&pencrypt); + __pcrypt_deinit_instance(&pdecrypt); crypto_unregister_template(&pcrypt_tmpl); } diff --git a/include/linux/padata.h b/include/linux/padata.h index 8844b851191e..621e7736690c 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -25,6 +25,10 @@ #include #include #include +#include + +#define PADATA_CPU_SERIAL 0x01 +#define PADATA_CPU_PARALLEL 0x02 /** * struct padata_priv - Embedded to the users data structure. @@ -59,7 +63,20 @@ struct padata_list { }; /** - * struct padata_queue - The percpu padata queues. +* struct padata_serial_queue - The percpu padata serial queue +* +* @serial: List to wait for serialization after reordering. +* @work: work struct for serialization. +* @pd: Backpointer to the internal control structure. +*/ +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +/** + * struct padata_parallel_queue - The percpu padata parallel queue * * @parallel: List to wait for parallelization. * @reorder: List to wait for reordering after parallel processing. @@ -67,44 +84,52 @@ struct padata_list { * @pwork: work struct for parallelization. * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ -struct padata_queue { - struct padata_list parallel; - struct padata_list reorder; - struct padata_list serial; - struct work_struct pwork; - struct work_struct swork; - struct parallel_data *pd; - int cpu_index; +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; }; + /** * struct parallel_data - Internal control structure, covers everything * that depends on the cpumask in use. * * @pinst: padata instance. - * @queue: percpu padata queues. + * @pqueue: percpu padata queues used for parallelization. + * @squeue: percpu padata queues used for serialuzation. * @seq_nr: The sequence number that will be attached to the next object. * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. - * @cpumask: cpumask in use. + * @cpumask: Contains two cpumasks: pcpu and cbcpu for + * parallel and serial workers respectively. * @lock: Reorder lock. * @processed: Number of already processed objects. * @timer: Reorder timer. */ struct parallel_data { - struct padata_instance *pinst; - struct padata_queue *queue; - atomic_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; - cpumask_var_t cpumask; - spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; + struct padata_instance *pinst; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; + atomic_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; + struct { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; + } cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** @@ -113,32 +138,51 @@ struct parallel_data { * @cpu_notifier: cpu hotplug notifier. * @wq: The workqueue in use. * @pd: The internal control structure. - * @cpumask: User supplied cpumask. + * @cpumask: User supplied cpumask. Contains two cpumasks: pcpu and + * cbcpu for parallel and serial works respectivly. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct notifier_block cpu_notifier; - struct workqueue_struct *wq; - struct parallel_data *pd; - cpumask_var_t cpumask; - struct mutex lock; - u8 flags; -#define PADATA_INIT 1 -#define PADATA_RESET 2 -#define PADATA_INVALID 4 + struct notifier_block cpu_notifier; + struct workqueue_struct *wq; + struct parallel_data *pd; + struct { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; + } cpumask; + struct blocking_notifier_head cpumask_change_notifier; + struct mutex lock; + u8 flags; +#define PADATA_INIT 1 +#define PADATA_RESET 2 +#define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq); +extern struct padata_instance *__padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern int padata_set_cpumask(struct padata_instance *pinst, +extern int padata_get_cpumask(struct padata_instance *pinst, + int cpumask_type, struct cpumask *out_mask); +extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); +extern int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); +extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 450d67d394b0..84d0ca9dac9c 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -35,9 +35,9 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; - target_cpu = cpumask_first(pd->cpumask); + target_cpu = cpumask_first(pd->cpumask.pcpu); for (cpu = 0; cpu < cpu_index; cpu++) - target_cpu = cpumask_next(target_cpu, pd->cpumask); + target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); return target_cpu; } @@ -53,26 +53,27 @@ static int padata_cpu_hash(struct padata_priv *padata) * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ - cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); + cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); return padata_index_to_cpu(pd, cpu_index); } -static void padata_parallel_worker(struct work_struct *work) +static void padata_parallel_worker(struct work_struct *parallel_work) { - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; struct padata_instance *pinst; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, pwork); - pd = queue->pd; + pqueue = container_of(parallel_work, + struct padata_parallel_queue, work); + pd = pqueue->pd; pinst = pd->pinst; - spin_lock(&queue->parallel.lock); - list_replace_init(&queue->parallel.list, &local_list); - spin_unlock(&queue->parallel.lock); + spin_lock(&pqueue->parallel.lock); + list_replace_init(&pqueue->parallel.list, &local_list); + spin_unlock(&pqueue->parallel.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -94,7 +95,7 @@ static void padata_parallel_worker(struct work_struct *work) * @pinst: padata instance * @padata: object to be parallelized * @cb_cpu: cpu the serialization callback function will run on, - * must be in the cpumask of padata. + * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). * * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel @@ -104,7 +105,7 @@ int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu) { int target_cpu, err; - struct padata_queue *queue; + struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); @@ -115,7 +116,7 @@ int padata_do_parallel(struct padata_instance *pinst, if (!(pinst->flags & PADATA_INIT)) goto out; - if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) + if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) goto out; err = -EBUSY; @@ -136,13 +137,13 @@ int padata_do_parallel(struct padata_instance *pinst, padata->seq_nr = atomic_inc_return(&pd->seq_nr); target_cpu = padata_cpu_hash(padata); - queue = per_cpu_ptr(pd->queue, target_cpu); + queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); - queue_work_on(target_cpu, pinst->wq, &queue->pwork); + queue_work_on(target_cpu, pinst->wq, &queue->work); out: rcu_read_unlock_bh(); @@ -172,11 +173,11 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) { int cpu, num_cpus; int next_nr, next_index; - struct padata_queue *queue, *next_queue; + struct padata_parallel_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - num_cpus = cpumask_weight(pd->cpumask); + num_cpus = cpumask_weight(pd->cpumask.pcpu); /* * Calculate the percpu reorder queue and the sequence @@ -185,13 +186,13 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) next_nr = pd->processed; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); if (unlikely(next_nr > pd->max_seq_nr)) { next_nr = next_nr - pd->max_seq_nr - 1; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); pd->processed = 0; } @@ -215,7 +216,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) goto out; } - queue = per_cpu_ptr(pd->queue, smp_processor_id()); + queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); if (queue->cpu_index == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); goto out; @@ -229,7 +230,7 @@ out: static void padata_reorder(struct parallel_data *pd) { struct padata_priv *padata; - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; /* @@ -268,13 +269,13 @@ static void padata_reorder(struct parallel_data *pd) return; } - queue = per_cpu_ptr(pd->queue, padata->cb_cpu); + squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); - spin_lock(&queue->serial.lock); - list_add_tail(&padata->list, &queue->serial.list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_add_tail(&padata->list, &squeue->serial.list); + spin_unlock(&squeue->serial.lock); - queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); + queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); } spin_unlock_bh(&pd->lock); @@ -300,19 +301,19 @@ static void padata_reorder_timer(unsigned long arg) padata_reorder(pd); } -static void padata_serial_worker(struct work_struct *work) +static void padata_serial_worker(struct work_struct *serial_work) { - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct parallel_data *pd; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, swork); - pd = queue->pd; + squeue = container_of(serial_work, struct padata_serial_queue, work); + pd = squeue->pd; - spin_lock(&queue->serial.lock); - list_replace_init(&queue->serial.list, &local_list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_replace_init(&squeue->serial.list, &local_list); + spin_unlock(&squeue->serial.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -339,18 +340,18 @@ static void padata_serial_worker(struct work_struct *work) void padata_do_serial(struct padata_priv *padata) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; pd = padata->pd; cpu = get_cpu(); - queue = per_cpu_ptr(pd->queue, cpu); + pqueue = per_cpu_ptr(pd->pqueue, cpu); - spin_lock(&queue->reorder.lock); + spin_lock(&pqueue->reorder.lock); atomic_inc(&pd->reorder_objects); - list_add_tail(&padata->list, &queue->reorder.list); - spin_unlock(&queue->reorder.lock); + list_add_tail(&padata->list, &pqueue->reorder.list); + spin_unlock(&pqueue->reorder.lock); put_cpu(); @@ -358,51 +359,88 @@ void padata_do_serial(struct padata_priv *padata) } EXPORT_SYMBOL(padata_do_serial); -/* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - const struct cpumask *cpumask) +static int padata_setup_cpumasks(struct parallel_data *pd, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { - int cpu, cpu_index, num_cpus; - struct padata_queue *queue; - struct parallel_data *pd; + if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) + return -ENOMEM; - cpu_index = 0; + cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pd->cpumask.cbcpu); + return -ENOMEM; + } - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); - if (!pd) - goto err; + cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); + return 0; +} - pd->queue = alloc_percpu(struct padata_queue); - if (!pd->queue) - goto err_free_pd; +static void __padata_list_init(struct padata_list *pd_list) +{ + INIT_LIST_HEAD(&pd_list->list); + spin_lock_init(&pd_list->lock); +} - if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) - goto err_free_queue; +/* Initialize all percpu queues used by serial workers */ +static void padata_init_squeues(struct parallel_data *pd) +{ + int cpu; + struct padata_serial_queue *squeue; - cpumask_and(pd->cpumask, cpumask, cpu_active_mask); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + squeue->pd = pd; + __padata_list_init(&squeue->serial); + INIT_WORK(&squeue->work, padata_serial_worker); + } +} - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); +/* Initialize all percpu queues used by parallel workers */ +static void padata_init_pqueues(struct parallel_data *pd) +{ + int cpu_index, num_cpus, cpu; + struct padata_parallel_queue *pqueue; - queue->pd = pd; + cpu_index = 0; + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + pqueue->pd = pd; + pqueue->cpu_index = cpu_index; + + __padata_list_init(&pqueue->reorder); + __padata_list_init(&pqueue->parallel); + INIT_WORK(&pqueue->work, padata_parallel_worker); + atomic_set(&pqueue->num_obj, 0); + } - queue->cpu_index = cpu_index; - cpu_index++; + num_cpus = cpumask_weight(pd->cpumask.pcpu); + pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; +} - INIT_LIST_HEAD(&queue->reorder.list); - INIT_LIST_HEAD(&queue->parallel.list); - INIT_LIST_HEAD(&queue->serial.list); - spin_lock_init(&queue->reorder.lock); - spin_lock_init(&queue->parallel.lock); - spin_lock_init(&queue->serial.lock); +/* Allocate and initialize the internal cpumask dependend resources. */ +static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) +{ + struct parallel_data *pd; - INIT_WORK(&queue->pwork, padata_parallel_worker); - INIT_WORK(&queue->swork, padata_serial_worker); - } + pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); + if (!pd) + goto err; - num_cpus = cpumask_weight(pd->cpumask); - pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; + pd->pqueue = alloc_percpu(struct padata_parallel_queue); + if (!pd->pqueue) + goto err_free_pd; + + pd->squeue = alloc_percpu(struct padata_serial_queue); + if (!pd->squeue) + goto err_free_pqueue; + if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) + goto err_free_squeue; + padata_init_pqueues(pd); + padata_init_squeues(pd); setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); @@ -412,8 +450,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, return pd; -err_free_queue: - free_percpu(pd->queue); +err_free_squeue: + free_percpu(pd->squeue); +err_free_pqueue: + free_percpu(pd->pqueue); err_free_pd: kfree(pd); err: @@ -422,8 +462,10 @@ err: static void padata_free_pd(struct parallel_data *pd) { - free_cpumask_var(pd->cpumask); - free_percpu(pd->queue); + free_cpumask_var(pd->cpumask.pcpu); + free_cpumask_var(pd->cpumask.cbcpu); + free_percpu(pd->pqueue); + free_percpu(pd->squeue); kfree(pd); } @@ -431,11 +473,12 @@ static void padata_free_pd(struct parallel_data *pd) static void padata_flush_queues(struct parallel_data *pd) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->pwork); + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + flush_work(&pqueue->work); } del_timer_sync(&pd->timer); @@ -443,9 +486,9 @@ static void padata_flush_queues(struct parallel_data *pd) if (atomic_read(&pd->reorder_objects)) padata_reorder(pd); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->swork); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + flush_work(&squeue->work); } BUG_ON(atomic_read(&pd->refcnt) != 0); @@ -475,21 +518,63 @@ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { struct parallel_data *pd_old = pinst->pd; + int notification_mask = 0; pinst->flags |= PADATA_RESET; rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); + if (!pd_old) + goto out; - if (pd_old) { - padata_flush_queues(pd_old); - padata_free_pd(pd_old); - } + padata_flush_queues(pd_old); + if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) + notification_mask |= PADATA_CPU_PARALLEL; + if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) + notification_mask |= PADATA_CPU_SERIAL; + + padata_free_pd(pd_old); + if (notification_mask) + blocking_notifier_call_chain(&pinst->cpumask_change_notifier, + notification_mask, pinst); +out: pinst->flags &= ~PADATA_RESET; } +/** + * padata_register_cpumask_notifier - Registers a notifier that will be called + * if either pcpu or cbcpu or both cpumasks change. + * + * @pinst: A poineter to padata instance + * @nblock: A pointer to notifier block. + */ +int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_register_cpumask_notifier); + +/** + * padata_unregister_cpumask_notifier - Unregisters cpumask notifier + * registered earlier using padata_register_cpumask_notifier + * + * @pinst: A pointer to data instance. + * @nlock: A pointer to notifier block. + */ +int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_unregister( + &pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_unregister_cpumask_notifier); + + /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) @@ -504,13 +589,82 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, } /** - * padata_set_cpumask - set the cpumask that padata should use + * padata_get_cpumask: Fetch serial or parallel cpumask from the + * given padata instance and copy it to @out_mask + * + * @pinst: A pointer to padata instance + * @cpumask_type: Specifies which cpumask will be copied. + * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL + * corresponding to serial and parallel cpumask respectively. + * @out_mask: A pointer to cpumask structure where selected + * cpumask will be copied. + */ +int padata_get_cpumask(struct padata_instance *pinst, + int cpumask_type, struct cpumask *out_mask) +{ + struct parallel_data *pd; + int ret = 0; + + rcu_read_lock_bh(); + pd = rcu_dereference(pinst->pd); + switch (cpumask_type) { + case PADATA_CPU_SERIAL: + cpumask_copy(out_mask, pd->cpumask.cbcpu); + break; + case PADATA_CPU_PARALLEL: + cpumask_copy(out_mask, pd->cpumask.pcpu); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock_bh(); + return ret; +} +EXPORT_SYMBOL(padata_get_cpumask); + +/** + * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value + * equivalent to @cpumask. * * @pinst: padata instance + * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding + * to parallel and serial cpumasks respectively. * @cpumask: the cpumask to use */ -int padata_set_cpumask(struct padata_instance *pinst, - cpumask_var_t cpumask) +int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + cpumask_var_t cpumask) +{ + struct cpumask *serial_mask, *parallel_mask; + + switch (cpumask_type) { + case PADATA_CPU_PARALLEL: + serial_mask = pinst->cpumask.cbcpu; + parallel_mask = cpumask; + break; + case PADATA_CPU_SERIAL: + parallel_mask = pinst->cpumask.pcpu; + serial_mask = cpumask; + break; + default: + return -EINVAL; + } + + return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); +} +EXPORT_SYMBOL(padata_set_cpumask); + +/** + * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; int err = 0; @@ -518,7 +672,13 @@ int padata_set_cpumask(struct padata_instance *pinst, mutex_lock(&pinst->lock); - valid = padata_validate_cpumask(pinst, cpumask); + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; @@ -526,14 +686,15 @@ int padata_set_cpumask(struct padata_instance *pinst, get_online_cpus(); - pd = padata_alloc_pd(pinst, cpumask); + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) { err = -ENOMEM; goto out; } out_replace: - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); padata_replace(pinst, pd); @@ -546,41 +707,57 @@ out: mutex_unlock(&pinst->lock); return err; + } -EXPORT_SYMBOL(padata_set_cpumask); +EXPORT_SYMBOL(__padata_set_cpumasks); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd; if (cpumask_test_cpu(cpu, cpu_active_mask)) { - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; padata_replace(pinst, pd); - if (padata_validate_cpumask(pinst, pinst->cpumask)) + if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && + padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } return 0; } -/** - * padata_add_cpu - add a cpu to the padata cpumask + /** + * padata_add_cpu - add a cpu to one or both(parallel and serial) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to add + * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_add_cpu(struct padata_instance *pinst, int cpu) + +int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_set_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_set_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_add_cpu(pinst, cpu); put_online_cpus(); @@ -596,13 +773,15 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) if (cpumask_test_cpu(cpu, cpu_online_mask)) { - if (!padata_validate_cpumask(pinst, pinst->cpumask)) { + if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { __padata_stop(pinst); padata_replace(pinst, pd); goto out; } - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; @@ -613,20 +792,32 @@ out: return 0; } -/** - * padata_remove_cpu - remove a cpu from the padata cpumask + /** + * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to remove + * @mask: bitmask specifying from which cpumask @cpu should be removed + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_remove_cpu(struct padata_instance *pinst, int cpu) +int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_clear_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_remove_cpu(pinst, cpu); put_online_cpus(); @@ -672,6 +863,14 @@ void padata_stop(struct padata_instance *pinst) EXPORT_SYMBOL(padata_stop); #ifdef CONFIG_HOTPLUG_CPU + +static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) +{ + return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || + cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); +} + + static int padata_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -684,7 +883,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_add_cpu(pinst, cpu); @@ -695,7 +894,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_remove_cpu(pinst, cpu); @@ -706,7 +905,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_remove_cpu(pinst, cpu); @@ -714,7 +913,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_add_cpu(pinst, cpu); @@ -726,13 +925,29 @@ static int padata_cpu_callback(struct notifier_block *nfb, #endif /** - * padata_alloc - allocate and initialize a padata instance + * padata_alloc - Allocate and initialize padata instance. + * Use default cpumask(cpu_possible_mask) + * for serial and parallel workes. + * + * @wq: workqueue to use for the allocated padata instance + */ +struct padata_instance *padata_alloc(struct workqueue_struct *wq) +{ + return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); +} +EXPORT_SYMBOL(padata_alloc); + +/** + * __padata_alloc - allocate and initialize a padata instance + * and specify cpumasks for serial and parallel workers. * - * @cpumask: cpumask that padata uses for parallelization * @wq: workqueue to use for the allocated padata instance + * @pcpumask: cpumask that will be used for padata parallelization + * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq) +struct padata_instance *__padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -742,21 +957,26 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, goto err; get_online_cpus(); - - if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) + if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) + goto err_free_inst; + if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pinst->cpumask.pcpu); goto err_free_inst; - - if (padata_validate_cpumask(pinst, cpumask)) { - pd = padata_alloc_pd(pinst, cpumask); - if (!pd) - goto err_free_mask; } + if (!padata_validate_cpumask(pinst, pcpumask) || + !padata_validate_cpumask(pinst, cbcpumask)) + goto err_free_masks; + + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + goto err_free_masks; rcu_assign_pointer(pinst->pd, pd); pinst->wq = wq; - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); pinst->flags = 0; @@ -768,19 +988,21 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, put_online_cpus(); + BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); mutex_init(&pinst->lock); return pinst; -err_free_mask: - free_cpumask_var(pinst->cpumask); +err_free_masks: + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); err_free_inst: kfree(pinst); put_online_cpus(); err: return NULL; } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(__padata_alloc); /** * padata_free - free a padata instance @@ -795,7 +1017,8 @@ void padata_free(struct padata_instance *pinst) padata_stop(pinst); padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); kfree(pinst); } EXPORT_SYMBOL(padata_free); -- cgit v1.2.3 From 5e017dc3f8bc9e4a28983666e6bc00114a2018bb Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:33:08 +0400 Subject: padata: Added sysfs primitives to padata subsystem Added sysfs primitives to padata subsystem. Now API user may embedded kobject each padata instance contains into any sysfs hierarchy. For now padata sysfs interface provides only two objects: serial_cpumask [RW] - cpumask for serial workers parallel_cpumask [RW] - cpumask for parallel workers Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- include/linux/padata.h | 5 +- kernel/padata.c | 155 ++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 150 insertions(+), 10 deletions(-) (limited to 'kernel/padata.c') diff --git a/include/linux/padata.h b/include/linux/padata.h index 621e7736690c..293ad46ffced 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -26,6 +26,7 @@ #include #include #include +#include #define PADATA_CPU_SERIAL 0x01 #define PADATA_CPU_PARALLEL 0x02 @@ -142,7 +143,8 @@ struct parallel_data { * cbcpu for parallel and serial works respectivly. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu - * or both cpumasks change. + * or both cpumasks change. + * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ @@ -155,6 +157,7 @@ struct padata_instance { cpumask_var_t cbcpu; } cpumask; struct blocking_notifier_head cpumask_change_notifier; + struct kobject kobj; struct mutex lock; u8 flags; #define PADATA_INIT 1 diff --git a/kernel/padata.c b/kernel/padata.c index 84d0ca9dac9c..526f9ea2fcc8 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #define MAX_SEQ_NR (INT_MAX - NR_CPUS) @@ -924,6 +925,149 @@ static int padata_cpu_callback(struct notifier_block *nfb, } #endif +static void __padata_free(struct padata_instance *pinst) +{ +#ifdef CONFIG_HOTPLUG_CPU + unregister_hotcpu_notifier(&pinst->cpu_notifier); +#endif + + padata_stop(pinst); + padata_free_pd(pinst->pd); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); + kfree(pinst); +} + +#define kobj2pinst(_kobj) \ + container_of(_kobj, struct padata_instance, kobj) +#define attr2pentry(_attr) \ + container_of(_attr, struct padata_sysfs_entry, attr) + +static void padata_sysfs_release(struct kobject *kobj) +{ + struct padata_instance *pinst = kobj2pinst(kobj); + __padata_free(pinst); +} + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, + const char *, size_t); +}; + +static ssize_t show_cpumask(struct padata_instance *pinst, + struct attribute *attr, char *buf) +{ + struct cpumask *cpumask; + ssize_t len; + + mutex_lock(&pinst->lock); + if (!strcmp(attr->name, "serial_cpumask")) + cpumask = pinst->cpumask.cbcpu; + else + cpumask = pinst->cpumask.pcpu; + + len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), + nr_cpu_ids); + if (PAGE_SIZE - len < 2) + len = -EINVAL; + else + len += sprintf(buf + len, "\n"); + + mutex_unlock(&pinst->lock); + return len; +} + +static ssize_t store_cpumask(struct padata_instance *pinst, + struct attribute *attr, + const char *buf, size_t count) +{ + cpumask_var_t new_cpumask; + ssize_t ret; + int mask_type; + + if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) + return -ENOMEM; + + ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), + nr_cpumask_bits); + if (ret < 0) + goto out; + + mask_type = !strcmp(attr->name, "serial_cpumask") ? + PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; + ret = padata_set_cpumask(pinst, mask_type, new_cpumask); + if (!ret) + ret = count; + +out: + free_cpumask_var(new_cpumask); + return ret; +} + +#define PADATA_ATTR_RW(_name, _show_name, _store_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0644, _show_name, _store_name) +#define PADATA_ATTR_RO(_name, _show_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0400, _show_name, NULL) + +PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); +PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); + +/* + * Padata sysfs provides the following objects: + * serial_cpumask [RW] - cpumask for serial workers + * parallel_cpumask [RW] - cpumask for parallel workers + */ +static struct attribute *padata_default_attrs[] = { + &serial_cpumask_attr.attr, + ¶llel_cpumask_attr.attr, + NULL, +}; + +static ssize_t padata_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->show(pinst, attr, buf); + + return ret; +} + +static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->store(pinst, attr, buf, count); + + return ret; +} + +static const struct sysfs_ops padata_sysfs_ops = { + .show = padata_sysfs_show, + .store = padata_sysfs_store, +}; + +static struct kobj_type padata_attr_type = { + .sysfs_ops = &padata_sysfs_ops, + .default_attrs = padata_default_attrs, + .release = padata_sysfs_release, +}; + /** * padata_alloc - Allocate and initialize padata instance. * Use default cpumask(cpu_possible_mask) @@ -989,6 +1133,7 @@ struct padata_instance *__padata_alloc(struct workqueue_struct *wq, put_online_cpus(); BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); + kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); return pinst; @@ -1011,14 +1156,6 @@ EXPORT_SYMBOL(__padata_alloc); */ void padata_free(struct padata_instance *pinst) { -#ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&pinst->cpu_notifier); -#endif - - padata_stop(pinst); - padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask.pcpu); - free_cpumask_var(pinst->cpumask.cbcpu); - kfree(pinst); + kobject_put(&pinst->kobj); } EXPORT_SYMBOL(padata_free); -- cgit v1.2.3 From fad3a906d324c02b3c25ef51f702384154089846 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:48:34 +0200 Subject: padata: Fix cpu index counting The counting of the cpu index got lost with a recent commit. This patch restores it. This fixes a hang of the parallel worker threads on cpu hotplug. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index 526f9ea2fcc8..4287868bbe37 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -408,6 +408,7 @@ static void padata_init_pqueues(struct parallel_data *pd) pqueue = per_cpu_ptr(pd->pqueue, cpu); pqueue->pd = pd; pqueue->cpu_index = cpu_index; + cpu_index++; __padata_list_init(&pqueue->reorder); __padata_list_init(&pqueue->parallel); -- cgit v1.2.3 From b89661dff525a46edb7ee8a4423b5212068c05c0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:49:20 +0200 Subject: padata: Allocate cpumask dependend recources in any case The cpumask separation work assumes the cpumask dependend recources present regardless of valid or invalid cpumasks. With this patch we allocate the cpumask dependend recources in any case. This fixes two NULL pointer dereference crashes in padata_replace and in padata_get_cpumask. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index 4287868bbe37..6a519454a5bd 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -417,7 +417,7 @@ static void padata_init_pqueues(struct parallel_data *pd) } num_cpus = cpumask_weight(pd->cpumask.pcpu); - pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; + pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0; } /* Allocate and initialize the internal cpumask dependend resources. */ @@ -527,21 +527,19 @@ static void padata_replace(struct padata_instance *pinst, rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); - if (!pd_old) - goto out; - padata_flush_queues(pd_old); if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) notification_mask |= PADATA_CPU_PARALLEL; if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) notification_mask |= PADATA_CPU_SERIAL; + padata_flush_queues(pd_old); padata_free_pd(pd_old); + if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, notification_mask, pinst); -out: pinst->flags &= ~PADATA_RESET; } @@ -673,6 +671,7 @@ int __padata_set_cpumasks(struct padata_instance *pinst, struct parallel_data *pd = NULL; mutex_lock(&pinst->lock); + get_online_cpus(); valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { @@ -681,20 +680,16 @@ int __padata_set_cpumasks(struct padata_instance *pinst, } valid = padata_validate_cpumask(pinst, cbcpumask); - if (!valid) { + if (!valid) __padata_stop(pinst); - goto out_replace; - } - - get_online_cpus(); +out_replace: pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) { err = -ENOMEM; goto out; } -out_replace: cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); @@ -705,7 +700,6 @@ out_replace: out: put_online_cpus(); - mutex_unlock(&pinst->lock); return err; @@ -776,11 +770,8 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) if (cpumask_test_cpu(cpu, cpu_online_mask)) { if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || - !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_stop(pinst); - padata_replace(pinst, pd); - goto out; - } pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); @@ -790,7 +781,6 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) padata_replace(pinst, pd); } -out: return 0; } -- cgit v1.2.3 From 7424713b83587006da72da84d7922471e366faba Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 20 Jul 2010 08:51:25 +0200 Subject: padata: Check for valid cpumasks Now that we allow to change the cpumasks from userspace, we have to check for valid cpumasks in padata_do_parallel. This patch adds the necessary check. This fixes a division by zero crash if the parallel cpumask contains no active cpu. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/padata.c') diff --git a/kernel/padata.c b/kernel/padata.c index 6a519454a5bd..7f895e2b4efb 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -114,7 +114,7 @@ int padata_do_parallel(struct padata_instance *pinst, pd = rcu_dereference(pinst->pd); err = -EINVAL; - if (!(pinst->flags & PADATA_INIT)) + if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) goto out; if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) -- cgit v1.2.3 From e6cc11707661770ca2bd4db4b0256d28f48e7541 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:14:28 +0200 Subject: padata: Rename padata_alloc functions We rename padata_alloc to padata_alloc_possible because this function allocates a padata_instance and uses the cpu_possible mask for parallel and serial workers. Also we rename __padata_alloc to padata_alloc to avoid to export underlined functions. Underlined functions are considered to be private to padata. Users are updated accordingly. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 2 +- include/linux/padata.h | 9 +++++---- kernel/padata.c | 24 ++++++++++++------------ 3 files changed, 18 insertions(+), 17 deletions(-) (limited to 'kernel/padata.c') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 794c172b99f7..55460839624e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -457,7 +457,7 @@ static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt, if (!pcrypt->wq) goto err; - pcrypt->pinst = padata_alloc(pcrypt->wq); + pcrypt->pinst = padata_alloc_possible(pcrypt->wq); if (!pcrypt->pinst) goto err_destroy_workqueue; diff --git a/include/linux/padata.h b/include/linux/padata.h index 293ad46ffced..71dfc9d1f856 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -165,10 +165,11 @@ struct padata_instance { #define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc(struct workqueue_struct *wq); -extern struct padata_instance *__padata_alloc(struct workqueue_struct *wq, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask); +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); diff --git a/kernel/padata.c b/kernel/padata.c index 7f895e2b4efb..12860bce6b78 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -1060,29 +1060,29 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - Allocate and initialize padata instance. - * Use default cpumask(cpu_possible_mask) - * for serial and parallel workes. + * padata_alloc_possible - Allocate and initialize padata instance. + * Use the cpu_possible_mask for serial and + * parallel workers. * * @wq: workqueue to use for the allocated padata instance */ -struct padata_instance *padata_alloc(struct workqueue_struct *wq) +struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) { - return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); + return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(padata_alloc_possible); /** - * __padata_alloc - allocate and initialize a padata instance - * and specify cpumasks for serial and parallel workers. + * padata_alloc - allocate and initialize a padata instance and specify + * cpumasks for serial and parallel workers. * * @wq: workqueue to use for the allocated padata instance * @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *__padata_alloc(struct workqueue_struct *wq, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -1138,7 +1138,7 @@ err_free_inst: err: return NULL; } -EXPORT_SYMBOL(__padata_alloc); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance -- cgit v1.2.3 From 65ff577e6b6e482ee9de3569e058edebdc02f069 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:06 +0200 Subject: padata: Rearrange set_cpumask functions padata_set_cpumask needs to be protected by a lock. We make __padata_set_cpumasks unlocked and static. So this function can be used by the exported and locked padata_set_cpumask and padata_set_cpumasks functions. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 +-- kernel/padata.c | 117 ++++++++++++++++++++++++++++--------------------- 2 files changed, 70 insertions(+), 53 deletions(-) (limited to 'kernel/padata.c') diff --git a/include/linux/padata.h b/include/linux/padata.h index 71dfc9d1f856..bb0fc5dd0bbb 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -178,9 +178,9 @@ extern int padata_get_cpumask(struct padata_instance *pinst, int cpumask_type, struct cpumask *out_mask); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int __padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, - cpumask_var_t cbcpumask); +extern int padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); diff --git a/kernel/padata.c b/kernel/padata.c index 12860bce6b78..4987203770bc 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -623,6 +623,66 @@ int padata_get_cpumask(struct padata_instance *pinst, } EXPORT_SYMBOL(padata_get_cpumask); +static int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int valid; + struct parallel_data *pd; + + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); + if (!valid) + __padata_stop(pinst); + +out_replace: + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + return -ENOMEM; + + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + + padata_replace(pinst, pd); + + if (valid) + __padata_start(pinst); + + return 0; +} + +/** + * padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int err; + + mutex_lock(&pinst->lock); + get_online_cpus(); + + err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); + + put_online_cpus(); + mutex_unlock(&pinst->lock); + + return err; + +} +EXPORT_SYMBOL(padata_set_cpumasks); + /** * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. @@ -636,6 +696,10 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; + int err = -EINVAL; + + mutex_lock(&pinst->lock); + get_online_cpus(); switch (cpumask_type) { case PADATA_CPU_PARALLEL: @@ -647,65 +711,18 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, serial_mask = cpumask; break; default: - return -EINVAL; + goto out; } - return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); -} -EXPORT_SYMBOL(padata_set_cpumask); - -/** - * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first - * one is used by parallel workers and the second one - * by the wokers doing serialization. - * - * @pinst: padata instance - * @pcpumask: the cpumask to use for parallel workers - * @cbcpumask: the cpumsak to use for serial workers - */ -int __padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -{ - int valid; - int err = 0; - struct parallel_data *pd = NULL; - - mutex_lock(&pinst->lock); - get_online_cpus(); - - valid = padata_validate_cpumask(pinst, pcpumask); - if (!valid) { - __padata_stop(pinst); - goto out_replace; - } - - valid = padata_validate_cpumask(pinst, cbcpumask); - if (!valid) - __padata_stop(pinst); - -out_replace: - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) { - err = -ENOMEM; - goto out; - } - - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - - padata_replace(pinst, pd); - - if (valid) - __padata_start(pinst); + err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: put_online_cpus(); mutex_unlock(&pinst->lock); return err; - } -EXPORT_SYMBOL(__padata_set_cpumasks); +EXPORT_SYMBOL(padata_set_cpumask); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { -- cgit v1.2.3 From c635696c7c0fbc720698dbec34bb83e53df6a967 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:50 +0200 Subject: padata: Pass the padata cpumasks to the cpumask_change_notifier chain We pass a pointer to the new padata cpumasks to the cpumask_change_notifier chain. So users can access the cpumasks without the need of an extra padata_get_cpumask function. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 40 +++++++++++++++++++++------------------- kernel/padata.c | 3 ++- 2 files changed, 23 insertions(+), 20 deletions(-) (limited to 'kernel/padata.c') diff --git a/include/linux/padata.h b/include/linux/padata.h index bb0fc5dd0bbb..43db792f44dd 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -98,6 +98,16 @@ struct padata_parallel_queue { int cpu_index; }; +/** + * struct padata_cpumask - The cpumasks for the parallel/serial workers + * + * @pcpu: cpumask for the parallel workers. + * @cbcpu: cpumask for the serial (callback) workers. + */ +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; /** * struct parallel_data - Internal control structure, covers everything @@ -110,8 +120,7 @@ struct padata_parallel_queue { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. - * @cpumask: Contains two cpumasks: pcpu and cbcpu for - * parallel and serial workers respectively. + * @cpumask: The cpumasks in use for parallel and serial workers. * @lock: Reorder lock. * @processed: Number of already processed objects. * @timer: Reorder timer. @@ -120,17 +129,14 @@ struct parallel_data { struct padata_instance *pinst; struct padata_parallel_queue *pqueue; struct padata_serial_queue *squeue; - atomic_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; - struct { - cpumask_var_t pcpu; - cpumask_var_t cbcpu; - } cpumask; - spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; + atomic_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** @@ -139,8 +145,7 @@ struct parallel_data { * @cpu_notifier: cpu hotplug notifier. * @wq: The workqueue in use. * @pd: The internal control structure. - * @cpumask: User supplied cpumask. Contains two cpumasks: pcpu and - * cbcpu for parallel and serial works respectivly. + * @cpumask: User supplied cpumasks for parallel and serial works. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu * or both cpumasks change. @@ -152,10 +157,7 @@ struct padata_instance { struct notifier_block cpu_notifier; struct workqueue_struct *wq; struct parallel_data *pd; - struct { - cpumask_var_t pcpu; - cpumask_var_t cbcpu; - } cpumask; + struct padata_cpumask cpumask; struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; diff --git a/kernel/padata.c b/kernel/padata.c index 4987203770bc..1c8c1d1d301d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -538,7 +538,8 @@ static void padata_replace(struct padata_instance *pinst, if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, - notification_mask, pinst); + notification_mask, + &pd_new->cpumask); pinst->flags &= ~PADATA_RESET; } -- cgit v1.2.3 From 0500e9b3f11ce84fc6ee48a3e29909145e58ba48 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:19:27 +0200 Subject: padata: Remove padata_get_cpumask A function that copies the padata cpumasks to a user buffer is a bit error prone. The cpumask can change any time so we can't be sure to have the right cpumask when using this function. A user who is interested in the padata cpumasks should register to the padata cpumask notifier chain instead. Users of padata_get_cpumask are already updated, so we can remove it. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 -- kernel/padata.c | 35 ----------------------------------- 2 files changed, 37 deletions(-) (limited to 'kernel/padata.c') diff --git a/include/linux/padata.h b/include/linux/padata.h index 43db792f44dd..bdcd1e9eacea 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -176,8 +176,6 @@ extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern int padata_get_cpumask(struct padata_instance *pinst, - int cpumask_type, struct cpumask *out_mask); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); extern int padata_set_cpumasks(struct padata_instance *pinst, diff --git a/kernel/padata.c b/kernel/padata.c index 1c8c1d1d301d..fd4679266ede 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -589,41 +589,6 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, return true; } -/** - * padata_get_cpumask: Fetch serial or parallel cpumask from the - * given padata instance and copy it to @out_mask - * - * @pinst: A pointer to padata instance - * @cpumask_type: Specifies which cpumask will be copied. - * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL - * corresponding to serial and parallel cpumask respectively. - * @out_mask: A pointer to cpumask structure where selected - * cpumask will be copied. - */ -int padata_get_cpumask(struct padata_instance *pinst, - int cpumask_type, struct cpumask *out_mask) -{ - struct parallel_data *pd; - int ret = 0; - - rcu_read_lock_bh(); - pd = rcu_dereference(pinst->pd); - switch (cpumask_type) { - case PADATA_CPU_SERIAL: - cpumask_copy(out_mask, pd->cpumask.cbcpu); - break; - case PADATA_CPU_PARALLEL: - cpumask_copy(out_mask, pd->cpumask.pcpu); - break; - default: - ret = -EINVAL; - } - - rcu_read_unlock_bh(); - return ret; -} -EXPORT_SYMBOL(padata_get_cpumask); - static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -- cgit v1.2.3