From 07bfd9bdf568a38d9440c607b72342036011f727 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 19 Nov 2019 17:41:31 +0800 Subject: crypto: pcrypt - Fix user-after-free on module unload On module unload of pcrypt we must unregister the crypto algorithms first and then tear down the padata structure. As otherwise the crypto algorithms are still alive and can be used while the padata structure is being freed. Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto...") Cc: Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 543792e0ebf0..81bbea7f2ba6 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -362,11 +362,12 @@ err: static void __exit pcrypt_exit(void) { + crypto_unregister_template(&pcrypt_tmpl); + pcrypt_fini_padata(pencrypt); pcrypt_fini_padata(pdecrypt); kset_unregister(pcrypt_kset); - crypto_unregister_template(&pcrypt_tmpl); } subsys_initcall(pcrypt_init); -- cgit v1.2.3 From bbefa1dd6a6d53537c11624752219e39959d04fb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 26 Nov 2019 15:58:45 +0800 Subject: crypto: pcrypt - Avoid deadlock by using per-instance padata queues If the pcrypt template is used multiple times in an algorithm, then a deadlock occurs because all pcrypt instances share the same padata_instance, which completes requests in the order submitted. That is, the inner pcrypt request waits for the outer pcrypt request while the outer request is already waiting for the inner. This patch fixes this by allocating a set of queues for each pcrypt instance instead of using two global queues. In order to maintain the existing user-space interface, the pinst structure remains global so any sysfs modifications will apply to every pcrypt instance. Note that when an update occurs we have to allocate memory for every pcrypt instance. Should one of the allocations fail we will abort the update without rolling back changes already made. The new per-instance data structure is called padata_shell and is essentially a wrapper around parallel_data. Reproducer: #include #include #include int main() { struct sockaddr_alg addr = { .salg_type = "aead", .salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))" }; int algfd, reqfd; char buf[32] = { 0 }; algfd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(algfd, (void *)&addr, sizeof(addr)); setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20); reqfd = accept(algfd, 0, 0); write(reqfd, buf, 32); read(reqfd, buf, 16); } Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper") Signed-off-by: Herbert Xu Tested-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 36 +++++++- include/linux/padata.h | 34 +++++-- kernel/padata.c | 236 ++++++++++++++++++++++++++++++++++--------------- 3 files changed, 227 insertions(+), 79 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 81bbea7f2ba6..3e026e7a7e75 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -24,6 +24,8 @@ static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; + struct padata_shell *psenc; + struct padata_shell *psdec; atomic_t tfm_count; }; @@ -32,6 +34,12 @@ struct pcrypt_aead_ctx { unsigned int cb_cpu; }; +static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx( + struct crypto_aead *tfm) +{ + return aead_instance_ctx(aead_alg_instance(tfm)); +} + static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -90,6 +98,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); + struct pcrypt_instance_ctx *ictx; + + ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); @@ -103,7 +114,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu); + err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -132,6 +143,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); + struct pcrypt_instance_ctx *ictx; + + ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); @@ -145,7 +159,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu); + err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -192,6 +206,8 @@ static void pcrypt_free(struct aead_instance *inst) struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); + padata_free_shell(ctx->psdec); + padata_free_shell(ctx->psenc); kfree(inst); } @@ -233,12 +249,22 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (!inst) return -ENOMEM; + err = -ENOMEM; + ctx = aead_instance_ctx(inst); + ctx->psenc = padata_alloc_shell(pencrypt); + if (!ctx->psenc) + goto out_free_inst; + + ctx->psdec = padata_alloc_shell(pdecrypt); + if (!ctx->psdec) + goto out_free_psenc; + crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); err = crypto_grab_aead(&ctx->spawn, name, 0, 0); if (err) - goto out_free_inst; + goto out_free_psdec; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); @@ -271,6 +297,10 @@ out: out_drop_aead: crypto_drop_aead(&ctx->spawn); +out_free_psdec: + padata_free_shell(ctx->psdec); +out_free_psenc: + padata_free_shell(ctx->psenc); out_free_inst: kfree(inst); goto out; diff --git a/include/linux/padata.h b/include/linux/padata.h index 23717eeaad23..cccab7a59787 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -9,6 +9,7 @@ #ifndef PADATA_H #define PADATA_H +#include #include #include #include @@ -98,7 +99,7 @@ struct padata_cpumask { * struct parallel_data - Internal control structure, covers everything * that depends on the cpumask in use. * - * @pinst: padata instance. + * @sh: padata_shell object. * @pqueue: percpu padata queues used for parallelization. * @squeue: percpu padata queues used for serialuzation. * @reorder_objects: Number of objects waiting in the reorder queues. @@ -111,7 +112,7 @@ struct padata_cpumask { * @lock: Reorder lock. */ struct parallel_data { - struct padata_instance *pinst; + struct padata_shell *ps; struct padata_parallel_queue __percpu *pqueue; struct padata_serial_queue __percpu *squeue; atomic_t reorder_objects; @@ -124,14 +125,33 @@ struct parallel_data { spinlock_t lock ____cacheline_aligned; }; +/** + * struct padata_shell - Wrapper around struct parallel_data, its + * purpose is to allow the underlying control structure to be replaced + * on the fly using RCU. + * + * @pinst: padat instance. + * @pd: Actual parallel_data structure which may be substituted on the fly. + * @opd: Pointer to old pd to be freed by padata_replace. + * @list: List entry in padata_instance list. + */ +struct padata_shell { + struct padata_instance *pinst; + struct parallel_data __rcu *pd; + struct parallel_data *opd; + struct list_head list; +}; + /** * struct padata_instance - The overall control structure. * * @cpu_notifier: cpu hotplug notifier. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. - * @pd: The internal control structure. + * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. + * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. + * @omask: Temporary storage used to compute the notification mask. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu * or both cpumasks change. @@ -143,8 +163,10 @@ struct padata_instance { struct hlist_node node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; - struct parallel_data *pd; + struct list_head pslist; struct padata_cpumask cpumask; + struct padata_cpumask rcpumask; + cpumask_var_t omask; struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; @@ -156,7 +178,9 @@ struct padata_instance { extern struct padata_instance *padata_alloc_possible(const char *name); extern void padata_free(struct padata_instance *pinst); -extern int padata_do_parallel(struct padata_instance *pinst, +extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); +extern void padata_free_shell(struct padata_shell *ps); +extern int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, diff --git a/kernel/padata.c b/kernel/padata.c index fc00f7e64133..8c8755f170ca 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -89,7 +89,7 @@ static void padata_parallel_worker(struct work_struct *parallel_work) /** * padata_do_parallel - padata parallelization function * - * @pinst: padata instance + * @ps: padatashell * @padata: object to be parallelized * @cb_cpu: pointer to the CPU that the serialization callback function should * run on. If it's not in the serial cpumask of @pinst @@ -100,16 +100,17 @@ static void padata_parallel_worker(struct work_struct *parallel_work) * Note: Every object which is parallelized by padata_do_parallel * must be seen by padata_do_serial. */ -int padata_do_parallel(struct padata_instance *pinst, +int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu) { + struct padata_instance *pinst = ps->pinst; int i, cpu, cpu_index, target_cpu, err; struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); - pd = rcu_dereference_bh(pinst->pd); + pd = rcu_dereference_bh(ps->pd); err = -EINVAL; if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) @@ -212,10 +213,10 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd, static void padata_reorder(struct parallel_data *pd) { + struct padata_instance *pinst = pd->ps->pinst; int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; - struct padata_instance *pinst = pd->pinst; struct padata_parallel_queue *next_queue; /* @@ -349,36 +350,39 @@ void padata_do_serial(struct padata_priv *padata) } EXPORT_SYMBOL(padata_do_serial); -static int padata_setup_cpumasks(struct parallel_data *pd, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +static int padata_setup_cpumasks(struct padata_instance *pinst) { struct workqueue_attrs *attrs; + int err; + + attrs = alloc_workqueue_attrs(); + if (!attrs) + return -ENOMEM; + + /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ + cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); + err = apply_workqueue_attrs(pinst->parallel_wq, attrs); + free_workqueue_attrs(attrs); + + return err; +} + +static int pd_setup_cpumasks(struct parallel_data *pd, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) +{ int err = -ENOMEM; if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) goto out; - cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); - if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) goto free_pcpu_mask; - cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); - attrs = alloc_workqueue_attrs(); - if (!attrs) - goto free_cbcpu_mask; - - /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ - cpumask_copy(attrs->cpumask, pd->cpumask.pcpu); - err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs); - free_workqueue_attrs(attrs); - if (err < 0) - goto free_cbcpu_mask; + cpumask_copy(pd->cpumask.pcpu, pcpumask); + cpumask_copy(pd->cpumask.cbcpu, cbcpumask); return 0; -free_cbcpu_mask: - free_cpumask_var(pd->cpumask.cbcpu); free_pcpu_mask: free_cpumask_var(pd->cpumask.pcpu); out: @@ -422,12 +426,16 @@ static void padata_init_pqueues(struct parallel_data *pd) } /* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) { + struct padata_instance *pinst = ps->pinst; + const struct cpumask *cbcpumask; + const struct cpumask *pcpumask; struct parallel_data *pd; + cbcpumask = pinst->rcpumask.cbcpu; + pcpumask = pinst->rcpumask.pcpu; + pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; @@ -440,8 +448,8 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, if (!pd->squeue) goto err_free_pqueue; - pd->pinst = pinst; - if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) + pd->ps = ps; + if (pd_setup_cpumasks(pd, pcpumask, cbcpumask)) goto err_free_squeue; padata_init_pqueues(pd); @@ -490,32 +498,64 @@ static void __padata_stop(struct padata_instance *pinst) } /* Replace the internal control structure with a new one. */ -static void padata_replace(struct padata_instance *pinst, - struct parallel_data *pd_new) +static int padata_replace_one(struct padata_shell *ps) { - struct parallel_data *pd_old = pinst->pd; - int notification_mask = 0; + struct parallel_data *pd_new; - pinst->flags |= PADATA_RESET; + pd_new = padata_alloc_pd(ps); + if (!pd_new) + return -ENOMEM; - rcu_assign_pointer(pinst->pd, pd_new); + ps->opd = rcu_dereference_protected(ps->pd, 1); + rcu_assign_pointer(ps->pd, pd_new); - synchronize_rcu(); + return 0; +} + +static int padata_replace(struct padata_instance *pinst, int cpu) +{ + int notification_mask = 0; + struct padata_shell *ps; + int err; + + pinst->flags |= PADATA_RESET; - if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) + cpumask_copy(pinst->omask, pinst->rcpumask.pcpu); + cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, + cpu_online_mask); + if (cpu >= 0) + cpumask_clear_cpu(cpu, pinst->rcpumask.pcpu); + if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu)) notification_mask |= PADATA_CPU_PARALLEL; - if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) + + cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu); + cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, + cpu_online_mask); + if (cpu >= 0) + cpumask_clear_cpu(cpu, pinst->rcpumask.cbcpu); + if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu)) notification_mask |= PADATA_CPU_SERIAL; - if (atomic_dec_and_test(&pd_old->refcnt)) - padata_free_pd(pd_old); + list_for_each_entry(ps, &pinst->pslist, list) { + err = padata_replace_one(ps); + if (err) + break; + } + + synchronize_rcu(); + + list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) + if (atomic_dec_and_test(&ps->opd->refcnt)) + padata_free_pd(ps->opd); if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, notification_mask, - &pd_new->cpumask); + &pinst->cpumask); pinst->flags &= ~PADATA_RESET; + + return err; } /** @@ -568,7 +608,7 @@ static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t cbcpumask) { int valid; - struct parallel_data *pd; + int err; valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { @@ -581,19 +621,15 @@ static int __padata_set_cpumasks(struct padata_instance *pinst, __padata_stop(pinst); out_replace: - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) - return -ENOMEM; - cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - padata_replace(pinst, pd); + err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1); if (valid) __padata_start(pinst); - return 0; + return err; } /** @@ -676,46 +712,32 @@ EXPORT_SYMBOL(padata_stop); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd; + int err = 0; if (cpumask_test_cpu(cpu, cpu_online_mask)) { - pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, - pinst->cpumask.cbcpu); - if (!pd) - return -ENOMEM; - - padata_replace(pinst, pd); + err = padata_replace(pinst, -1); if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } - return 0; + return err; } static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { - struct parallel_data *pd = NULL; + int err = 0; if (cpumask_test_cpu(cpu, cpu_online_mask)) { - if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_stop(pinst); - pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, - pinst->cpumask.cbcpu); - if (!pd) - return -ENOMEM; - - padata_replace(pinst, pd); - - cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); - cpumask_clear_cpu(cpu, pd->cpumask.pcpu); + err = padata_replace(pinst, cpu); } - return 0; + return err; } static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) @@ -763,8 +785,12 @@ static void __padata_free(struct padata_instance *pinst) cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); #endif + WARN_ON(!list_empty(&pinst->pslist)); + padata_stop(pinst); - padata_free_pd(pinst->pd); + free_cpumask_var(pinst->omask); + free_cpumask_var(pinst->rcpumask.cbcpu); + free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); destroy_workqueue(pinst->serial_wq); @@ -911,7 +937,6 @@ static struct padata_instance *padata_alloc(const char *name, const struct cpumask *cbcpumask) { struct padata_instance *pinst; - struct parallel_data *pd = NULL; pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) @@ -939,14 +964,22 @@ static struct padata_instance *padata_alloc(const char *name, !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) + if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL)) goto err_free_masks; + if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) + goto err_free_rcpumask_pcpu; + if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL)) + goto err_free_rcpumask_cbcpu; - rcu_assign_pointer(pinst->pd, pd); + INIT_LIST_HEAD(&pinst->pslist); cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask); + cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); + + if (padata_setup_cpumasks(pinst)) + goto err_free_omask; pinst->flags = 0; @@ -962,6 +995,12 @@ static struct padata_instance *padata_alloc(const char *name, return pinst; +err_free_omask: + free_cpumask_var(pinst->omask); +err_free_rcpumask_cbcpu: + free_cpumask_var(pinst->rcpumask.cbcpu); +err_free_rcpumask_pcpu: + free_cpumask_var(pinst->rcpumask.pcpu); err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); @@ -1000,6 +1039,61 @@ void padata_free(struct padata_instance *pinst) } EXPORT_SYMBOL(padata_free); +/** + * padata_alloc_shell - Allocate and initialize padata shell. + * + * @pinst: Parent padata_instance object. + */ +struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) +{ + struct parallel_data *pd; + struct padata_shell *ps; + + ps = kzalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + goto out; + + ps->pinst = pinst; + + get_online_cpus(); + pd = padata_alloc_pd(ps); + put_online_cpus(); + + if (!pd) + goto out_free_ps; + + mutex_lock(&pinst->lock); + RCU_INIT_POINTER(ps->pd, pd); + list_add(&ps->list, &pinst->pslist); + mutex_unlock(&pinst->lock); + + return ps; + +out_free_ps: + kfree(ps); +out: + return NULL; +} +EXPORT_SYMBOL(padata_alloc_shell); + +/** + * padata_free_shell - free a padata shell + * + * @ps: padata shell to free + */ +void padata_free_shell(struct padata_shell *ps) +{ + struct padata_instance *pinst = ps->pinst; + + mutex_lock(&pinst->lock); + list_del(&ps->list); + padata_free_pd(rcu_dereference_protected(ps->pd, 1)); + mutex_unlock(&pinst->lock); + + kfree(ps); +} +EXPORT_SYMBOL(padata_free_shell); + #ifdef CONFIG_HOTPLUG_CPU static __init int padata_driver_init(void) -- cgit v1.2.3 From 9c1e8836edbbaf3656bc07437b59c04be034ac4e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 26 Nov 2019 22:08:02 -0800 Subject: crypto: x86 - Regularize glue function prototypes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The crypto glue performed function prototype casting via macros to make indirect calls to assembly routines. Instead of performing casts at the call sites (which trips Control Flow Integrity prototype checking), switch each prototype to a common standard set of arguments which allows the removal of the existing macros. In order to keep pointer math unchanged, internal casting between u128 pointers and u8 pointers is added. Co-developed-by: João Moreira Signed-off-by: João Moreira Signed-off-by: Kees Cook Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_asm.S | 8 ++-- arch/x86/crypto/aesni-intel_glue.c | 45 +++++++----------- arch/x86/crypto/camellia_aesni_avx2_glue.c | 74 ++++++++++++++--------------- arch/x86/crypto/camellia_aesni_avx_glue.c | 72 +++++++++++++--------------- arch/x86/crypto/camellia_glue.c | 45 +++++++++--------- arch/x86/crypto/cast6_avx_glue.c | 68 ++++++++++++--------------- arch/x86/crypto/glue_helper.c | 23 +++++---- arch/x86/crypto/serpent_avx2_glue.c | 65 ++++++++++++-------------- arch/x86/crypto/serpent_avx_glue.c | 63 ++++++++++++------------- arch/x86/crypto/serpent_sse2_glue.c | 30 +++++++----- arch/x86/crypto/twofish_avx_glue.c | 75 +++++++++++++----------------- arch/x86/crypto/twofish_glue_3way.c | 37 ++++++++------- arch/x86/include/asm/crypto/camellia.h | 63 ++++++++++++------------- arch/x86/include/asm/crypto/glue_helper.h | 18 +++---- arch/x86/include/asm/crypto/serpent-avx.h | 20 ++++---- arch/x86/include/asm/crypto/serpent-sse2.h | 28 +++++------ arch/x86/include/asm/crypto/twofish.h | 19 ++++---- crypto/cast6_generic.c | 18 +++---- crypto/serpent_generic.c | 6 ++- include/crypto/cast6.h | 4 +- include/crypto/serpent.h | 4 +- include/crypto/xts.h | 2 - 22 files changed, 374 insertions(+), 413 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index d28503f99f58..cad6e1bfa7d5 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1942,7 +1942,7 @@ SYM_FUNC_START(aesni_set_key) SYM_FUNC_END(aesni_set_key) /* - * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + * void aesni_enc(const void *ctx, u8 *dst, const u8 *src) */ SYM_FUNC_START(aesni_enc) FRAME_BEGIN @@ -2131,7 +2131,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4) SYM_FUNC_END(_aesni_enc4) /* - * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + * void aesni_dec (const void *ctx, u8 *dst, const u8 *src) */ SYM_FUNC_START(aesni_dec) FRAME_BEGIN @@ -2716,8 +2716,8 @@ SYM_FUNC_END(aesni_ctr_enc) pxor CTR, IV; /* - * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, - * bool enc, u8 *iv) + * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst, + * const u8 *src, bool enc, le128 *iv) */ SYM_FUNC_START(aesni_xts_crypt8) FRAME_BEGIN diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3e707e81afdb..670f8fcf2544 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -83,10 +83,8 @@ struct gcm_context_data { asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); -asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in); -asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in); +asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in); +asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in); asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, @@ -106,8 +104,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); -asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, bool enc, u8 *iv); +asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, bool enc, le128 *iv); /* asmlinkage void aesni_gcm_enc() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. @@ -550,29 +548,24 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, } -static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) +static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - aesni_enc(ctx, out, in); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); } -static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); } -static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); + aesni_xts_crypt8(ctx, dst, src, true, iv); } -static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); -} - -static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) -{ - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); + aesni_xts_crypt8(ctx, dst, src, false, iv); } static const struct common_glue_ctx aesni_enc_xts = { @@ -581,10 +574,10 @@ static const struct common_glue_ctx aesni_enc_xts = { .funcs = { { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } + .fn_u = { .xts = aesni_xts_enc8 } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } + .fn_u = { .xts = aesni_xts_enc } } } }; @@ -594,10 +587,10 @@ static const struct common_glue_ctx aesni_dec_xts = { .funcs = { { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } + .fn_u = { .xts = aesni_xts_dec8 } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } + .fn_u = { .xts = aesni_xts_dec } } } }; @@ -606,8 +599,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&aesni_enc_xts, req, - XTS_TWEAK_CAST(aesni_xts_tweak), + return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx), false); @@ -618,8 +610,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&aesni_dec_xts, req, - XTS_TWEAK_CAST(aesni_xts_tweak), + return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx), true); diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index a4f00128ea55..a8cc2c83fe1b 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -19,20 +19,17 @@ #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 /* 32-way AVX2/AES-NI parallel cipher functions */ -asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static const struct common_glue_ctx camellia_enc = { .num_funcs = 4, @@ -40,16 +37,16 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) } + .fn_u = { .ecb = camellia_ecb_enc_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } + .fn_u = { .ecb = camellia_ecb_enc_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -59,16 +56,16 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) } + .fn_u = { .ctr = camellia_ctr_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } + .fn_u = { .ctr = camellia_ctr_16way } }, { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -78,13 +75,13 @@ static const struct common_glue_ctx camellia_enc_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) } + .fn_u = { .xts = camellia_xts_enc_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } + .fn_u = { .xts = camellia_xts_enc_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } + .fn_u = { .xts = camellia_xts_enc } } } }; @@ -94,16 +91,16 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) } + .fn_u = { .ecb = camellia_ecb_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } + .fn_u = { .ecb = camellia_ecb_dec_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -113,16 +110,16 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) } + .fn_u = { .cbc = camellia_cbc_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } + .fn_u = { .cbc = camellia_cbc_dec_16way } }, { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -132,13 +129,13 @@ static const struct common_glue_ctx camellia_dec_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) } + .fn_u = { .xts = camellia_xts_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } + .fn_u = { .xts = camellia_xts_dec_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } + .fn_u = { .xts = camellia_xts_dec } } } }; @@ -161,8 +158,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -180,8 +176,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_enc_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -190,8 +185,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_dec_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index f28d282779b8..31a82a79f4ac 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -18,41 +18,36 @@ #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_ctr_16way); -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); -void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(camellia_enc_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); } EXPORT_SYMBOL_GPL(camellia_xts_enc); -void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(camellia_dec_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); } EXPORT_SYMBOL_GPL(camellia_xts_dec); @@ -62,13 +57,13 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } + .fn_u = { .ecb = camellia_ecb_enc_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -78,13 +73,13 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } + .fn_u = { .ctr = camellia_ctr_16way } }, { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -94,10 +89,10 @@ static const struct common_glue_ctx camellia_enc_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } + .fn_u = { .xts = camellia_xts_enc_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } + .fn_u = { .xts = camellia_xts_enc } } } }; @@ -107,13 +102,13 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } + .fn_u = { .ecb = camellia_ecb_dec_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -123,13 +118,13 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } + .fn_u = { .cbc = camellia_cbc_dec_16way } }, { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -139,10 +134,10 @@ static const struct common_glue_ctx camellia_dec_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } + .fn_u = { .xts = camellia_xts_dec_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } + .fn_u = { .xts = camellia_xts_dec } } } }; @@ -165,8 +160,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -206,8 +200,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_enc_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -216,8 +209,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_dec_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 7c62db56ffe1..5f3ed5af68d7 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -18,19 +18,17 @@ #include /* regular block cipher functions */ -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, + bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk); /* 2-way parallel cipher functions */ -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, + bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -1267,8 +1265,10 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return camellia_setkey(&tfm->base, key, key_len); } -void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) +void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) { + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; u128 iv = *src; camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); @@ -1277,9 +1277,11 @@ void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); -void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) *dst = *src; @@ -1291,9 +1293,11 @@ void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(camellia_crypt_ctr); -void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[2]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) { dst[0] = src[0]; @@ -1315,10 +1319,10 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -1328,10 +1332,10 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -1341,10 +1345,10 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -1354,10 +1358,10 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -1373,8 +1377,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index a8a38fffb4a9..da5297475f9e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -20,20 +20,17 @@ #define CAST6_PARALLEL_BLOCKS 8 -asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); - -asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src, +asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); + +asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -41,21 +38,21 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, return cast6_setkey(&tfm->base, key, keylen); } -static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__cast6_encrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); } -static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__cast6_decrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); } -static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -70,10 +67,10 @@ static const struct common_glue_ctx cast6_enc = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) } + .fn_u = { .ecb = cast6_ecb_enc_8way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) } + .fn_u = { .ecb = __cast6_encrypt } } } }; @@ -83,10 +80,10 @@ static const struct common_glue_ctx cast6_ctr = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) } + .fn_u = { .ctr = cast6_ctr_8way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) } + .fn_u = { .ctr = cast6_crypt_ctr } } } }; @@ -96,10 +93,10 @@ static const struct common_glue_ctx cast6_enc_xts = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) } + .fn_u = { .xts = cast6_xts_enc_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) } + .fn_u = { .xts = cast6_xts_enc } } } }; @@ -109,10 +106,10 @@ static const struct common_glue_ctx cast6_dec = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) } + .fn_u = { .ecb = cast6_ecb_dec_8way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) } + .fn_u = { .ecb = __cast6_decrypt } } } }; @@ -122,10 +119,10 @@ static const struct common_glue_ctx cast6_dec_cbc = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) } + .fn_u = { .cbc = cast6_cbc_dec_8way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) } + .fn_u = { .cbc = __cast6_decrypt } } } }; @@ -135,10 +132,10 @@ static const struct common_glue_ctx cast6_dec_xts = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) } + .fn_u = { .xts = cast6_xts_dec_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) } + .fn_u = { .xts = cast6_xts_dec } } } }; @@ -154,8 +151,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -199,8 +195,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&cast6_enc_xts, req, - XTS_TWEAK_CAST(__cast6_encrypt), + return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -209,8 +204,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&cast6_dec_xts, req, - XTS_TWEAK_CAST(__cast6_encrypt), + return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index d15b99397480..d3d91a0abf88 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -134,7 +134,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, src -= num_blocks - 1; dst -= num_blocks - 1; - gctx->funcs[i].fn_u.cbc(ctx, dst, src); + gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, + (const u8 *)src); nbytes -= func_bytes; if (nbytes < bsize) @@ -188,7 +189,9 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, /* Process multi-block batch */ do { - gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); + gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, + (const u8 *)src, + &ctrblk); src += num_blocks; dst += num_blocks; nbytes -= func_bytes; @@ -210,7 +213,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, be128_to_le128(&ctrblk, (be128 *)walk.iv); memcpy(&tmp, walk.src.virt.addr, nbytes); - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp, + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, + (const u8 *)&tmp, &ctrblk); memcpy(walk.dst.virt.addr, &tmp, nbytes); le128_to_be128((be128 *)walk.iv, &ctrblk); @@ -240,7 +244,8 @@ static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, if (nbytes >= func_bytes) { do { - gctx->funcs[i].fn_u.xts(ctx, dst, src, + gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, + (const u8 *)src, walk->iv); src += num_blocks; @@ -354,8 +359,8 @@ out: } EXPORT_SYMBOL_GPL(glue_xts_req_128bit); -void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, - common_glue_func_t fn) +void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, + le128 *iv, common_glue_func_t fn) { le128 ivblk = *iv; @@ -363,13 +368,13 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, gf128mul_x_ble(iv, &ivblk); /* CC <- T xor C */ - u128_xor(dst, src, (u128 *)&ivblk); + u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); /* PP <- D(Key2,CC) */ - fn(ctx, (u8 *)dst, (u8 *)dst); + fn(ctx, dst, dst); /* P <- T xor PP */ - u128_xor(dst, dst, (u128 *)&ivblk); + u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); } EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 13fd8d3d2da0..f973ace44ad3 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -19,18 +19,16 @@ #define SERPENT_AVX2_PARALLEL_BLOCKS 16 /* 16-way AVX2 parallel cipher functions */ -asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); +asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src, +asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -44,13 +42,13 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) } + .fn_u = { .ecb = serpent_ecb_enc_16way } }, { .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -60,13 +58,13 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = 16, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) } + .fn_u = { .ctr = serpent_ctr_16way } }, { .num_blocks = 8, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } + .fn_u = { .ctr = serpent_ctr_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } + .fn_u = { .ctr = __serpent_crypt_ctr } } } }; @@ -76,13 +74,13 @@ static const struct common_glue_ctx serpent_enc_xts = { .funcs = { { .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) } + .fn_u = { .xts = serpent_xts_enc_16way } }, { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } + .fn_u = { .xts = serpent_xts_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } + .fn_u = { .xts = serpent_xts_enc } } } }; @@ -92,13 +90,13 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) } + .fn_u = { .ecb = serpent_ecb_dec_16way } }, { .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -108,13 +106,13 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = 16, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) } + .fn_u = { .cbc = serpent_cbc_dec_16way } }, { .num_blocks = 8, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -124,13 +122,13 @@ static const struct common_glue_ctx serpent_dec_xts = { .funcs = { { .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) } + .fn_u = { .xts = serpent_xts_dec_16way } }, { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } + .fn_u = { .xts = serpent_xts_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } + .fn_u = { .xts = serpent_xts_dec } } } }; @@ -146,8 +144,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -166,8 +163,8 @@ static int xts_encrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_enc_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, false); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, false); } static int xts_decrypt(struct skcipher_request *req) @@ -176,8 +173,8 @@ static int xts_decrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_dec_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, true); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 7d3dca38a5a2..7806d1cbe854 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -20,33 +20,35 @@ #include /* 8-way parallel cipher functions */ -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); -void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -56,17 +58,15 @@ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); -void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__serpent_encrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); } EXPORT_SYMBOL_GPL(serpent_xts_enc); -void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__serpent_decrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); } EXPORT_SYMBOL_GPL(serpent_xts_dec); @@ -102,10 +102,10 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -115,10 +115,10 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } + .fn_u = { .ctr = serpent_ctr_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } + .fn_u = { .ctr = __serpent_crypt_ctr } } } }; @@ -128,10 +128,10 @@ static const struct common_glue_ctx serpent_enc_xts = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } + .fn_u = { .xts = serpent_xts_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } + .fn_u = { .xts = serpent_xts_enc } } } }; @@ -141,10 +141,10 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -154,10 +154,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -167,10 +167,10 @@ static const struct common_glue_ctx serpent_dec_xts = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } + .fn_u = { .xts = serpent_xts_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } + .fn_u = { .xts = serpent_xts_dec } } } }; @@ -186,8 +186,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -206,8 +205,8 @@ static int xts_encrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_enc_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, false); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, false); } static int xts_decrypt(struct skcipher_request *req) @@ -216,8 +215,8 @@ static int xts_decrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_dec_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, true); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 5fdf1931d069..4fed8d26b91a 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -31,9 +31,11 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) +static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) { u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; unsigned int j; for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) @@ -45,9 +47,11 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); } -static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -56,10 +60,12 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) u128_xor(dst, src, (u128 *)&ctrblk); } -static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, +static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; unsigned int i; for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { @@ -79,10 +85,10 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } + .fn_u = { .ecb = serpent_enc_blk_xway } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -92,10 +98,10 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } + .fn_u = { .ctr = serpent_crypt_ctr_xway } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } + .fn_u = { .ctr = serpent_crypt_ctr } } } }; @@ -105,10 +111,10 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } + .fn_u = { .ecb = serpent_dec_blk_xway } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -118,10 +124,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } + .fn_u = { .cbc = serpent_decrypt_cbc_xway } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -137,7 +143,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index d561c821788b..3b36e97ec7ab 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -22,20 +22,17 @@ #define TWOFISH_PARALLEL_BLOCKS 8 /* 8-way parallel cipher functions */ -asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -43,22 +40,19 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, return twofish_setkey(&tfm->base, key, keylen); } -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src) +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } -static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(twofish_enc_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); } -static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(twofish_dec_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); } struct twofish_xts_ctx { @@ -93,13 +87,13 @@ static const struct common_glue_ctx twofish_enc = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) } + .fn_u = { .ecb = twofish_ecb_enc_8way } }, { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } + .fn_u = { .ecb = twofish_enc_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } + .fn_u = { .ecb = twofish_enc_blk } } } }; @@ -109,13 +103,13 @@ static const struct common_glue_ctx twofish_ctr = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) } + .fn_u = { .ctr = twofish_ctr_8way } }, { .num_blocks = 3, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } + .fn_u = { .ctr = twofish_enc_blk_ctr } } } }; @@ -125,10 +119,10 @@ static const struct common_glue_ctx twofish_enc_xts = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) } + .fn_u = { .xts = twofish_xts_enc_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) } + .fn_u = { .xts = twofish_xts_enc } } } }; @@ -138,13 +132,13 @@ static const struct common_glue_ctx twofish_dec = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) } + .fn_u = { .ecb = twofish_ecb_dec_8way } }, { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } + .fn_u = { .ecb = twofish_dec_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .ecb = twofish_dec_blk } } } }; @@ -154,13 +148,13 @@ static const struct common_glue_ctx twofish_dec_cbc = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) } + .fn_u = { .cbc = twofish_cbc_dec_8way } }, { .num_blocks = 3, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .cbc = twofish_dec_blk } } } }; @@ -170,10 +164,10 @@ static const struct common_glue_ctx twofish_dec_xts = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) } + .fn_u = { .xts = twofish_xts_dec_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) } + .fn_u = { .xts = twofish_xts_dec } } } }; @@ -189,8 +183,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -208,8 +201,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&twofish_enc_xts, req, - XTS_TWEAK_CAST(twofish_enc_blk), + return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -218,8 +210,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&twofish_dec_xts, req, - XTS_TWEAK_CAST(twofish_enc_blk), + return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 1dc9e29f221e..768af6075479 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -25,21 +25,22 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, return twofish_setkey(&tfm->base, key, keylen); } -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src) +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } -static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, +static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, true); } -void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) +void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) { u128 ivs[2]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; ivs[0] = src[0]; ivs[1] = src[1]; @@ -51,9 +52,11 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); -void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) *dst = *src; @@ -66,10 +69,11 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); -void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, - le128 *iv) +void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[3]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) { dst[0] = src[0]; @@ -94,10 +98,10 @@ static const struct common_glue_ctx twofish_enc = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } + .fn_u = { .ecb = twofish_enc_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } + .fn_u = { .ecb = twofish_enc_blk } } } }; @@ -107,10 +111,10 @@ static const struct common_glue_ctx twofish_ctr = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } + .fn_u = { .ctr = twofish_enc_blk_ctr } } } }; @@ -120,10 +124,10 @@ static const struct common_glue_ctx twofish_dec = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } + .fn_u = { .ecb = twofish_dec_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .ecb = twofish_dec_blk } } } }; @@ -133,10 +137,10 @@ static const struct common_glue_ctx twofish_dec_cbc = { .funcs = { { .num_blocks = 3, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .cbc = twofish_dec_blk } } } }; @@ -152,8 +156,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index a5d86fc0593f..f1592619dd65 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -32,65 +32,60 @@ extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); /* regular block cipher functions */ -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); /* 2-way parallel cipher functions */ -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); - -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); - -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); - -static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src) +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); + +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); + +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); + +static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk(ctx, dst, src, false); } -static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, - const u8 *src) +static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk(ctx, dst, src, true); } -static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk_2way(ctx, dst, src, false); } -static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk_2way(ctx, dst, src, true); } /* glue helpers */ -extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src); -extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, +extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); +extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, +extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); -extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); +extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); #endif /* ASM_X86_CAMELLIA_H */ diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h index 8d4a8e1226ee..777c0f63418c 100644 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ b/arch/x86/include/asm/crypto/glue_helper.h @@ -11,18 +11,13 @@ #include #include -typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); -typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, +typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); +typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); +typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src, +typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) -#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) -#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) -#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn)) - struct common_glue_func_entry { unsigned int num_blocks; /* number of blocks that @fn will process */ union { @@ -116,7 +111,8 @@ extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, common_glue_func_t tweak_fn, void *tweak_ctx, void *crypt_ctx, bool decrypt); -extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, - le128 *iv, common_glue_func_t fn); +extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, + const u8 *src, le128 *iv, + common_glue_func_t fn); #endif /* _CRYPTO_GLUE_HELPER_H */ diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h index db7c9cc32234..251c2c89d7cf 100644 --- a/arch/x86/include/asm/crypto/serpent-avx.h +++ b/arch/x86/include/asm/crypto/serpent-avx.h @@ -15,26 +15,26 @@ struct serpent_xts_ctx { struct serpent_ctx crypt_ctx; }; -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, +extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); -extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); +extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); +extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h index 1a345e8a7496..860ca248914b 100644 --- a/arch/x86/include/asm/crypto/serpent-sse2.h +++ b/arch/x86/include/asm/crypto/serpent-sse2.h @@ -9,25 +9,23 @@ #define SERPENT_PARALLEL_BLOCKS 4 -asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_4way(ctx, dst, src, false); } -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, + u8 *dst, const u8 *src) { __serpent_enc_blk_4way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_4way(ctx, dst, src); } @@ -36,25 +34,23 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, #define SERPENT_PARALLEL_BLOCKS 8 -asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_8way(ctx, dst, src, false); } -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, + u8 *dst, const u8 *src) { __serpent_enc_blk_8way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_8way(ctx, dst, src); } diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h index f618bf272b90..2c377a8042e1 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/include/asm/crypto/twofish.h @@ -7,22 +7,19 @@ #include /* regular block cipher functions from twofish_x86_64 module */ -asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ -asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); /* helpers from twofish_x86_64-3way module */ -extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); -extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, +extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); +extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, +extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); #endif /* ASM_X86_TWOFISH_H */ diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index a8248f8e2777..85328522c5ca 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -154,7 +154,7 @@ int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) EXPORT_SYMBOL_GPL(cast6_setkey); /*forward quad round*/ -static inline void Q(u32 *block, u8 *Kr, u32 *Km) +static inline void Q(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); @@ -164,7 +164,7 @@ static inline void Q(u32 *block, u8 *Kr, u32 *Km) } /*reverse quad round*/ -static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) +static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[3] ^= F1(block[0], Kr[3], Km[3]); @@ -173,13 +173,14 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) block[2] ^= F1(block[3], Kr[0], Km[0]); } -void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { + const struct cast6_ctx *c = ctx; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 *Km; - u8 *Kr; + const u32 *Km; + const u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); @@ -211,13 +212,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } -void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { + const struct cast6_ctx *c = ctx; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 *Km; - u8 *Kr; + const u32 *Km; + const u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 56fa665a4f01..492c1d0bfe06 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -449,8 +449,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) } EXPORT_SYMBOL_GPL(serpent_setkey); -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) { + const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; @@ -514,8 +515,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) __serpent_encrypt(ctx, dst, src); } -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) { + const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h index c71f6ef47f0f..4c8d0c72f78d 100644 --- a/include/crypto/cast6.h +++ b/include/crypto/cast6.h @@ -19,7 +19,7 @@ int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen, u32 *flags); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); -void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); +void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h index 7dd780c5d058..75c7eaa20853 100644 --- a/include/crypto/serpent.h +++ b/include/crypto/serpent.h @@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, unsigned int keylen); int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); +void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 75fd96ff976b..15ae7fdc0478 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -8,8 +8,6 @@ #define XTS_BLOCK_SIZE 16 -#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) - static inline int xts_check_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From e8d998264bffade3cfe0536559f712ab9058d654 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 29 Nov 2019 16:40:24 +0800 Subject: crypto: pcrypt - Do not clear MAY_SLEEP flag in original request We should not be modifying the original request's MAY_SLEEP flag upon completion. It makes no sense to do so anyway. Reported-by: Eric Biggers Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto...") Signed-off-by: Herbert Xu Tested-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 3e026e7a7e75..a4f3b3f342c8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -71,7 +71,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err) struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; - req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } -- cgit v1.2.3 From 0a940d4e27658a545884351c46a70b132272a38d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:16:48 -0800 Subject: crypto: api - remove another reference to blkcipher Update a comment to refer to crypto_alloc_skcipher() rather than crypto_alloc_blkcipher() (the latter having been removed). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index 55bca28df92d..4d3d13872fac 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -516,7 +516,7 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * The returned transform is of a non-determinate type. Most people * should use one of the more specific allocation functions such as - * crypto_alloc_blkcipher. + * crypto_alloc_skcipher(). * * In case of error the return value is an error pointer. */ -- cgit v1.2.3 From 140734d3711335836cc1b9706d73953750fa4c8d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:03 -0800 Subject: crypto: skcipher - remove crypto_skcipher::ivsize Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher::ivsize is now redundant since it always equals crypto_skcipher_alg(tfm)->ivsize. Remove it and update crypto_skcipher_ivsize() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 1 - include/crypto/skcipher.h | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 13da43c84b64..7d2e722e82af 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -686,7 +686,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->setkey = skcipher_setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; - skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; skcipher_set_needkey(skcipher); diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index b4655d91661f..bf656a97cb65 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -40,7 +40,6 @@ struct crypto_skcipher { int (*encrypt)(struct skcipher_request *req); int (*decrypt)(struct skcipher_request *req); - unsigned int ivsize; unsigned int reqsize; unsigned int keysize; @@ -255,7 +254,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return tfm->ivsize; + return crypto_skcipher_alg(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( -- cgit v1.2.3 From 9ac0d136938ad8b8dd309f833abe5304dd2f0b08 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:04 -0800 Subject: crypto: skcipher - remove crypto_skcipher::keysize Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher::keysize is now redundant since it always equals crypto_skcipher_alg(tfm)->max_keysize. Remove it and update crypto_skcipher_default_keysize() accordingly. Also rename crypto_skcipher_default_keysize() to crypto_skcipher_max_keysize() to clarify that it specifically returns the maximum key size, not some unspecified "default". Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 3 +-- crypto/testmgr.c | 10 ++++++---- fs/ecryptfs/crypto.c | 2 +- fs/ecryptfs/keystore.c | 4 ++-- include/crypto/skcipher.h | 5 ++--- 5 files changed, 12 insertions(+), 12 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7d2e722e82af..6cfafd80c7e6 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -585,7 +585,7 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) static void skcipher_set_needkey(struct crypto_skcipher *tfm) { - if (tfm->keysize) + if (crypto_skcipher_max_keysize(tfm) != 0) crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } @@ -686,7 +686,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->setkey = skcipher_setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; - skcipher->keysize = alg->max_keysize; skcipher_set_needkey(skcipher); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 82513b6b0abd..85d720a57bb0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2647,7 +2647,7 @@ static void generate_random_cipher_testvec(struct skcipher_request *req, char *name, size_t max_namelen) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const unsigned int maxkeysize = tfm->keysize; + const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm); const unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct scatterlist src, dst; u8 iv[MAX_IVLEN]; @@ -2693,6 +2693,7 @@ static int test_skcipher_vs_generic_impl(const char *driver, struct cipher_test_sglists *tsgls) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm); const unsigned int ivsize = crypto_skcipher_ivsize(tfm); const unsigned int blocksize = crypto_skcipher_blocksize(tfm); const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; @@ -2751,9 +2752,10 @@ static int test_skcipher_vs_generic_impl(const char *driver, /* Check the algorithm properties for consistency. */ - if (tfm->keysize != generic_tfm->keysize) { + if (maxkeysize != crypto_skcipher_max_keysize(generic_tfm)) { pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n", - driver, tfm->keysize, generic_tfm->keysize); + driver, maxkeysize, + crypto_skcipher_max_keysize(generic_tfm)); err = -EINVAL; goto out; } @@ -2778,7 +2780,7 @@ static int test_skcipher_vs_generic_impl(const char *driver, * the other implementation against them. */ - vec.key = kmalloc(tfm->keysize, GFP_KERNEL); + vec.key = kmalloc(maxkeysize, GFP_KERNEL); vec.iv = kmalloc(ivsize, GFP_KERNEL); vec.ptext = kmalloc(maxdatasize, GFP_KERNEL); vec.ctext = kmalloc(maxdatasize, GFP_KERNEL); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index f91db24bbf3b..db1ef144c63a 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1586,7 +1586,7 @@ ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm, } crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); if (*key_size == 0) - *key_size = crypto_skcipher_default_keysize(*key_tfm); + *key_size = crypto_skcipher_max_keysize(*key_tfm); get_random_bytes(dummy_key, *key_size); rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size); if (rc) { diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 216fbe6a4837..7d326aa0308e 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -2204,9 +2204,9 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, if (mount_crypt_stat->global_default_cipher_key_size == 0) { printk(KERN_WARNING "No key size specified at mount; " "defaulting to [%d]\n", - crypto_skcipher_default_keysize(tfm)); + crypto_skcipher_max_keysize(tfm)); mount_crypt_stat->global_default_cipher_key_size = - crypto_skcipher_default_keysize(tfm); + crypto_skcipher_max_keysize(tfm); } if (crypt_stat->key_size == 0) crypt_stat->key_size = diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index bf656a97cb65..d8c28c8186a4 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -41,7 +41,6 @@ struct crypto_skcipher { int (*decrypt)(struct skcipher_request *req); unsigned int reqsize; - unsigned int keysize; struct crypto_tfm base; }; @@ -377,10 +376,10 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } -static inline unsigned int crypto_skcipher_default_keysize( +static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return tfm->keysize; + return crypto_skcipher_alg(tfm)->max_keysize; } /** -- cgit v1.2.3 From 15252d942739813c8d0eac4c1ee6d4c4eb6f101e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:05 -0800 Subject: crypto: skcipher - remove crypto_skcipher::setkey Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher::setkey now always points to skcipher_setkey(). Simplify by removing this function pointer and instead just making skcipher_setkey() be crypto_skcipher_setkey() directly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 4 ++-- include/crypto/skcipher.h | 9 ++------- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 6cfafd80c7e6..4197b5ed57c4 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -610,7 +610,7 @@ static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, return ret; } -static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, +int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); @@ -635,6 +635,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } +EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); int crypto_skcipher_encrypt(struct skcipher_request *req) { @@ -683,7 +684,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); - skcipher->setkey = skcipher_setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index d8c28c8186a4..ea94cc422b94 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -35,8 +35,6 @@ struct skcipher_request { }; struct crypto_skcipher { - int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); int (*encrypt)(struct skcipher_request *req); int (*decrypt)(struct skcipher_request *req); @@ -364,11 +362,8 @@ static inline void crypto_sync_skcipher_clear_flags( * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ -static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, - const u8 *key, unsigned int keylen) -{ - return tfm->setkey(tfm, key, keylen); -} +int crypto_skcipher_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen); static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, const u8 *key, unsigned int keylen) -- cgit v1.2.3 From 848755e315b61b0f939f6021e119373ff4a407ab Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:06 -0800 Subject: crypto: skcipher - remove crypto_skcipher::encrypt Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher::encrypt is now redundant since it always equals crypto_skcipher_alg(tfm)->encrypt. Remove it and update crypto_skcipher_encrypt() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 3 +-- include/crypto/skcipher.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 4197b5ed57c4..926295ce1b07 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -648,7 +648,7 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else - ret = tfm->encrypt(req); + ret = crypto_skcipher_alg(tfm)->encrypt(req); crypto_stats_skcipher_encrypt(cryptlen, ret, alg); return ret; } @@ -684,7 +684,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); - skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; skcipher_set_needkey(skcipher); diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index ea94cc422b94..694215a59719 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -35,7 +35,6 @@ struct skcipher_request { }; struct crypto_skcipher { - int (*encrypt)(struct skcipher_request *req); int (*decrypt)(struct skcipher_request *req); unsigned int reqsize; -- cgit v1.2.3 From 7e1c10991822de1c7a2d64647da56d96f430606c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:07 -0800 Subject: crypto: skcipher - remove crypto_skcipher::decrypt Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher::decrypt is now redundant since it always equals crypto_skcipher_alg(tfm)->decrypt. Remove it and update crypto_skcipher_decrypt() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 4 +--- include/crypto/skcipher.h | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 926295ce1b07..e4e4a445dc66 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -665,7 +665,7 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else - ret = tfm->decrypt(req); + ret = crypto_skcipher_alg(tfm)->decrypt(req); crypto_stats_skcipher_decrypt(cryptlen, ret, alg); return ret; } @@ -684,8 +684,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); - skcipher->decrypt = alg->decrypt; - skcipher_set_needkey(skcipher); if (alg->exit) diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 694215a59719..8ebf4167632b 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -35,8 +35,6 @@ struct skcipher_request { }; struct crypto_skcipher { - int (*decrypt)(struct skcipher_request *req); - unsigned int reqsize; struct crypto_tfm base; -- cgit v1.2.3 From 89873b4411348325c5e513ea8086f0193eda5163 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 10:23:08 -0800 Subject: crypto: skcipher - remove crypto_skcipher_extsize() Due to the removal of the blkcipher and ablkcipher algorithm types, crypto_skcipher_extsize() now simply calls crypto_alg_extsize(). So remove it and just use crypto_alg_extsize(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index e4e4a445dc66..39a718d99220 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -578,11 +578,6 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); -static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) -{ - return crypto_alg_extsize(alg); -} - static void skcipher_set_needkey(struct crypto_skcipher *tfm) { if (crypto_skcipher_max_keysize(tfm) != 0) @@ -749,7 +744,7 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) #endif static const struct crypto_type crypto_skcipher_type = { - .extsize = crypto_skcipher_extsize, + .extsize = crypto_alg_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS -- cgit v1.2.3 From c28817895464797a8299b24e35ead1085b3e40fb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Nov 2019 11:35:22 -0800 Subject: crypto: shash - allow essiv and hmac to use OPTIONAL_KEY algorithms The essiv and hmac templates refuse to use any hash algorithm that has a ->setkey() function, which includes not just algorithms that always need a key, but also algorithms that optionally take a key. Previously the only optionally-keyed hash algorithms in the crypto API were non-cryptographic algorithms like crc32, so this didn't really matter. But that's changed with BLAKE2 support being added. BLAKE2 should work with essiv and hmac, just like any other cryptographic hash. Fix this by allowing the use of both algorithms without a ->setkey() function and algorithms that have the OPTIONAL_KEY flag set. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/essiv.c | 2 +- crypto/hmac.c | 4 ++-- crypto/shash.c | 3 +-- include/crypto/internal/hash.h | 6 ++++++ 4 files changed, 10 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/essiv.c b/crypto/essiv.c index 808f2b362106..e4b32c2ea7ec 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -442,7 +442,7 @@ static bool essiv_supported_algorithms(const char *essiv_cipher_name, if (ivsize != alg->cra_blocksize) goto out; - if (crypto_shash_alg_has_setkey(hash_alg)) + if (crypto_shash_alg_needs_key(hash_alg)) goto out; ret = true; diff --git a/crypto/hmac.c b/crypto/hmac.c index 8b2a212eb0ad..377f07733e2f 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -185,9 +185,9 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(salg); alg = &salg->base; - /* The underlying hash algorithm must be unkeyed */ + /* The underlying hash algorithm must not require a key */ err = -EINVAL; - if (crypto_shash_alg_has_setkey(salg)) + if (crypto_shash_alg_needs_key(salg)) goto out_put_alg; ds = salg->digestsize; diff --git a/crypto/shash.c b/crypto/shash.c index e83c5124f6eb..7989258a46b4 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -50,8 +50,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) { - if (crypto_shash_alg_has_setkey(alg) && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + if (crypto_shash_alg_needs_key(alg)) crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index bfc9db7b100d..f68dab38f160 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -85,6 +85,12 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) +{ + return crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, -- cgit v1.2.3 From eb455dbd02cb1074b37872ffca30a81cb2a18eaa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 1 Dec 2019 13:53:26 -0800 Subject: crypto: testmgr - don't try to decrypt uninitialized buffers Currently if the comparison fuzz tests encounter an encryption error when generating an skcipher or AEAD test vector, they will still test the decryption side (passing it the uninitialized ciphertext buffer) and expect it to fail with the same error. This is sort of broken because it's not well-defined usage of the API to pass an uninitialized buffer, and furthermore in the AEAD case it's acceptable for the decryption error to be EBADMSG (meaning "inauthentic input") even if the encryption error was something else like EINVAL. Fix this for skcipher by explicitly initializing the ciphertext buffer on error, and for AEAD by skipping the decryption test on error. Reported-by: Pascal Van Leeuwen Fixes: d435e10e67be ("crypto: testmgr - fuzz skciphers against their generic implementation") Fixes: 40153b10d91c ("crypto: testmgr - fuzz AEADs against their generic implementation") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 85d720a57bb0..a8940415512f 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req, * If the key or authentication tag size couldn't be set, no need to * continue to encrypt. */ + vec->crypt_error = 0; if (vec->setkey_error || vec->setauthsize_error) goto done; @@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver, req, tsgls); if (err) goto out; - err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg, - req, tsgls); - if (err) - goto out; + if (vec.crypt_error == 0) { + err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, + cfg, req, tsgls); + if (err) + goto out; + } cond_resched(); } err = 0; @@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req, skcipher_request_set_callback(req, 0, crypto_req_done, &wait); skcipher_request_set_crypt(req, &src, &dst, vec->len, iv); vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + if (vec->crypt_error != 0) { + /* + * The only acceptable error here is for an invalid length, so + * skcipher decryption should fail with the same error too. + * We'll test for this. But to keep the API usage well-defined, + * explicitly initialize the ciphertext buffer too. + */ + memset((u8 *)vec->ctext, 0, vec->len); + } done: snprintf(name, max_namelen, "\"random: len=%u klen=%u\"", vec->len, vec->klen); -- cgit v1.2.3 From fd60f727876467a89ac42c873e20b38d9a408062 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 1 Dec 2019 13:53:27 -0800 Subject: crypto: testmgr - check skcipher min_keysize When checking two implementations of the same skcipher algorithm for consistency, require that the minimum key size be the same, not just the maximum key size. There's no good reason to allow different minimum key sizes. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a8940415512f..3d7c1c1529cf 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2764,6 +2764,15 @@ static int test_skcipher_vs_generic_impl(const char *driver, /* Check the algorithm properties for consistency. */ + if (crypto_skcipher_min_keysize(tfm) != + crypto_skcipher_min_keysize(generic_tfm)) { + pr_err("alg: skcipher: min keysize for %s (%u) doesn't match generic impl (%u)\n", + driver, crypto_skcipher_min_keysize(tfm), + crypto_skcipher_min_keysize(generic_tfm)); + err = -EINVAL; + goto out; + } + if (maxkeysize != crypto_skcipher_max_keysize(generic_tfm)) { pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n", driver, maxkeysize, -- cgit v1.2.3 From fd8c37c72d60c7c8f5c4d0702a0b30499cf9d422 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 1 Dec 2019 13:53:28 -0800 Subject: crypto: testmgr - test setting misaligned keys The alignment bug in ghash_setkey() fixed by commit 5c6bc4dfa515 ("crypto: ghash - fix unaligned memory access in ghash_setkey()") wasn't reliably detected by the crypto self-tests on ARM because the tests only set the keys directly from the test vectors. To improve test coverage, update the tests to sometimes pass misaligned keys to setkey(). This applies to shash, ahash, skcipher, and aead. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 69 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 3d7c1c1529cf..d1ffa8f73948 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -259,6 +259,9 @@ struct test_sg_division { * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to * the @iv_offset + * @key_offset: misalignment of the key, where 0 is default alignment + * @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to + * the @key_offset * @finalization_type: what finalization function to use for hashes * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP. */ @@ -269,7 +272,9 @@ struct testvec_config { struct test_sg_division src_divs[XBUFSIZE]; struct test_sg_division dst_divs[XBUFSIZE]; unsigned int iv_offset; + unsigned int key_offset; bool iv_offset_relative_to_alignmask; + bool key_offset_relative_to_alignmask; enum finalization_type finalization_type; bool nosimd; }; @@ -297,6 +302,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = { .name = "unaligned buffer, offset=1", .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, .iv_offset = 1, + .key_offset = 1, }, { .name = "buffer aligned only to alignmask", .src_divs = { @@ -308,6 +314,8 @@ static const struct testvec_config default_cipher_testvec_configs[] = { }, .iv_offset = 1, .iv_offset_relative_to_alignmask = true, + .key_offset = 1, + .key_offset_relative_to_alignmask = true, }, { .name = "two even aligned splits", .src_divs = { @@ -323,6 +331,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = { { .proportion_of_total = 4800, .offset = 18 }, }, .iv_offset = 3, + .key_offset = 3, }, { .name = "misaligned splits crossing pages, inplace", .inplace = true, @@ -355,6 +364,7 @@ static const struct testvec_config default_hash_testvec_configs[] = { .name = "init+update+final misaligned buffer", .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, .finalization_type = FINALIZATION_TYPE_FINAL, + .key_offset = 1, }, { .name = "digest buffer aligned only to alignmask", .src_divs = { @@ -365,6 +375,8 @@ static const struct testvec_config default_hash_testvec_configs[] = { }, }, .finalization_type = FINALIZATION_TYPE_DIGEST, + .key_offset = 1, + .key_offset_relative_to_alignmask = true, }, { .name = "init+update+update+final two even splits", .src_divs = { @@ -740,6 +752,49 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls, alignmask, dst_total_len, NULL, NULL); } +/* + * Support for testing passing a misaligned key to setkey(): + * + * If cfg->key_offset is set, copy the key into a new buffer at that offset, + * optionally adding alignmask. Else, just use the key directly. + */ +static int prepare_keybuf(const u8 *key, unsigned int ksize, + const struct testvec_config *cfg, + unsigned int alignmask, + const u8 **keybuf_ret, const u8 **keyptr_ret) +{ + unsigned int key_offset = cfg->key_offset; + u8 *keybuf = NULL, *keyptr = (u8 *)key; + + if (key_offset != 0) { + if (cfg->key_offset_relative_to_alignmask) + key_offset += alignmask; + keybuf = kmalloc(key_offset + ksize, GFP_KERNEL); + if (!keybuf) + return -ENOMEM; + keyptr = keybuf + key_offset; + memcpy(keyptr, key, ksize); + } + *keybuf_ret = keybuf; + *keyptr_ret = keyptr; + return 0; +} + +/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */ +#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \ +({ \ + const u8 *keybuf, *keyptr; \ + int err; \ + \ + err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \ + &keybuf, &keyptr); \ + if (err == 0) { \ + err = setkey_f((tfm), keyptr, (ksize)); \ + kfree(keybuf); \ + } \ + err; \ +}) + #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* Generate a random length in range [0, max_len], but prefer smaller values */ @@ -966,6 +1021,11 @@ static void generate_random_testvec_config(struct testvec_config *cfg, p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); } + if (prandom_u32() % 2 == 0) { + cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); + p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); + } + WARN_ON_ONCE(!valid_testvec_config(cfg)); } @@ -1103,7 +1163,8 @@ static int test_shash_vec_cfg(const char *driver, /* Set the key, if specified */ if (vec->ksize) { - err = crypto_shash_setkey(tfm, vec->key, vec->ksize); + err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize, + cfg, alignmask); if (err) { if (err == vec->setkey_error) return 0; @@ -1290,7 +1351,8 @@ static int test_ahash_vec_cfg(const char *driver, /* Set the key, if specified */ if (vec->ksize) { - err = crypto_ahash_setkey(tfm, vec->key, vec->ksize); + err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize, + cfg, alignmask); if (err) { if (err == vec->setkey_error) return 0; @@ -1861,7 +1923,9 @@ static int test_aead_vec_cfg(const char *driver, int enc, crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); else crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); - err = crypto_aead_setkey(tfm, vec->key, vec->klen); + + err = do_setkey(crypto_aead_setkey, tfm, vec->key, vec->klen, + cfg, alignmask); if (err && err != vec->setkey_error) { pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", driver, vec_name, vec->setkey_error, err, @@ -2460,7 +2524,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc, else crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); - err = crypto_skcipher_setkey(tfm, vec->key, vec->klen); + err = do_setkey(crypto_skcipher_setkey, tfm, vec->key, vec->klen, + cfg, alignmask); if (err) { if (err == vec->setkey_error) return 0; -- cgit v1.2.3 From 2ea915054cf2dc1ccc145d7c75d3dad8dde15be3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 1 Dec 2019 13:53:29 -0800 Subject: crypto: testmgr - create struct aead_extra_tests_ctx In preparation for adding inauthentic input fuzz tests, which don't require that a generic implementation of the algorithm be available, refactor test_aead_vs_generic_impl() so that instead there's a higher-level function test_aead_extra() which initializes a struct aead_extra_tests_ctx and then calls test_aead_vs_generic_impl() with a pointer to that struct. As a bonus, this reduces stack usage. Also switch from crypto_aead_alg(tfm)->maxauthsize to crypto_aead_maxauthsize(), now that the latter is available in . Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 170 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 99 insertions(+), 71 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d1ffa8f73948..4fe210845e78 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2111,6 +2111,22 @@ static int test_aead_vec(const char *driver, int enc, } #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + +struct aead_extra_tests_ctx { + struct aead_request *req; + struct crypto_aead *tfm; + const char *driver; + const struct alg_test_desc *test_desc; + struct cipher_test_sglists *tsgls; + unsigned int maxdatasize; + unsigned int maxkeysize; + + struct aead_testvec vec; + char vec_name[64]; + char cfgname[TESTVEC_CONFIG_NAMELEN]; + struct testvec_config cfg; +}; + /* * Generate an AEAD test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. @@ -2123,7 +2139,7 @@ static void generate_random_aead_testvec(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); const unsigned int ivsize = crypto_aead_ivsize(tfm); - unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize; + const unsigned int maxauthsize = crypto_aead_maxauthsize(tfm); unsigned int authsize; unsigned int total_len; int i; @@ -2192,35 +2208,21 @@ done: } /* - * Test the AEAD algorithm represented by @req against the corresponding generic - * implementation, if one is available. + * Test the AEAD algorithm against the corresponding generic implementation, if + * one is available. */ -static int test_aead_vs_generic_impl(const char *driver, - const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) +static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - const unsigned int ivsize = crypto_aead_ivsize(tfm); - const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize; - const unsigned int blocksize = crypto_aead_blocksize(tfm); - const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; + struct crypto_aead *tfm = ctx->tfm; const char *algname = crypto_aead_alg(tfm)->base.cra_name; - const char *generic_driver = test_desc->generic_driver; + const char *driver = ctx->driver; + const char *generic_driver = ctx->test_desc->generic_driver; char _generic_driver[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *generic_tfm = NULL; struct aead_request *generic_req = NULL; - unsigned int maxkeysize; unsigned int i; - struct aead_testvec vec = { 0 }; - char vec_name[64]; - struct testvec_config *cfg; - char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) - return 0; - if (!generic_driver) { /* Use default naming convention? */ err = build_generic_driver_name(algname, _generic_driver); if (err) @@ -2244,12 +2246,6 @@ static int test_aead_vs_generic_impl(const char *driver, return err; } - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - err = -ENOMEM; - goto out; - } - generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL); if (!generic_req) { err = -ENOMEM; @@ -2258,24 +2254,27 @@ static int test_aead_vs_generic_impl(const char *driver, /* Check the algorithm properties for consistency. */ - if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) { + if (crypto_aead_maxauthsize(tfm) != + crypto_aead_maxauthsize(generic_tfm)) { pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n", - driver, maxauthsize, - crypto_aead_alg(generic_tfm)->maxauthsize); + driver, crypto_aead_maxauthsize(tfm), + crypto_aead_maxauthsize(generic_tfm)); err = -EINVAL; goto out; } - if (ivsize != crypto_aead_ivsize(generic_tfm)) { + if (crypto_aead_ivsize(tfm) != crypto_aead_ivsize(generic_tfm)) { pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n", - driver, ivsize, crypto_aead_ivsize(generic_tfm)); + driver, crypto_aead_ivsize(tfm), + crypto_aead_ivsize(generic_tfm)); err = -EINVAL; goto out; } - if (blocksize != crypto_aead_blocksize(generic_tfm)) { + if (crypto_aead_blocksize(tfm) != crypto_aead_blocksize(generic_tfm)) { pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n", - driver, blocksize, crypto_aead_blocksize(generic_tfm)); + driver, crypto_aead_blocksize(tfm), + crypto_aead_blocksize(generic_tfm)); err = -EINVAL; goto out; } @@ -2284,35 +2283,22 @@ static int test_aead_vs_generic_impl(const char *driver, * Now generate test vectors using the generic implementation, and test * the other implementation against them. */ - - maxkeysize = 0; - for (i = 0; i < test_desc->suite.aead.count; i++) - maxkeysize = max_t(unsigned int, maxkeysize, - test_desc->suite.aead.vecs[i].klen); - - vec.key = kmalloc(maxkeysize, GFP_KERNEL); - vec.iv = kmalloc(ivsize, GFP_KERNEL); - vec.assoc = kmalloc(maxdatasize, GFP_KERNEL); - vec.ptext = kmalloc(maxdatasize, GFP_KERNEL); - vec.ctext = kmalloc(maxdatasize, GFP_KERNEL); - if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) { - err = -ENOMEM; - goto out; - } - for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_aead_testvec(generic_req, &vec, - maxkeysize, maxdatasize, - vec_name, sizeof(vec_name)); - generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); - - err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg, - req, tsgls); + generate_random_aead_testvec(generic_req, &ctx->vec, + ctx->maxkeysize, ctx->maxdatasize, + ctx->vec_name, + sizeof(ctx->vec_name)); + generate_random_testvec_config(&ctx->cfg, ctx->cfgname, + sizeof(ctx->cfgname)); + err = test_aead_vec_cfg(driver, ENCRYPT, &ctx->vec, + ctx->vec_name, &ctx->cfg, + ctx->req, ctx->tsgls); if (err) goto out; - if (vec.crypt_error == 0) { - err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, - cfg, req, tsgls); + if (ctx->vec.crypt_error == 0) { + err = test_aead_vec_cfg(driver, DECRYPT, &ctx->vec, + ctx->vec_name, &ctx->cfg, + ctx->req, ctx->tsgls); if (err) goto out; } @@ -2320,21 +2306,63 @@ static int test_aead_vs_generic_impl(const char *driver, } err = 0; out: - kfree(cfg); - kfree(vec.key); - kfree(vec.iv); - kfree(vec.assoc); - kfree(vec.ptext); - kfree(vec.ctext); crypto_free_aead(generic_tfm); aead_request_free(generic_req); return err; } + +static int test_aead_extra(const char *driver, + const struct alg_test_desc *test_desc, + struct aead_request *req, + struct cipher_test_sglists *tsgls) +{ + struct aead_extra_tests_ctx *ctx; + unsigned int i; + int err; + + if (noextratests) + return 0; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->req = req; + ctx->tfm = crypto_aead_reqtfm(req); + ctx->driver = driver; + ctx->test_desc = test_desc; + ctx->tsgls = tsgls; + ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; + ctx->maxkeysize = 0; + for (i = 0; i < test_desc->suite.aead.count; i++) + ctx->maxkeysize = max_t(unsigned int, ctx->maxkeysize, + test_desc->suite.aead.vecs[i].klen); + + ctx->vec.key = kmalloc(ctx->maxkeysize, GFP_KERNEL); + ctx->vec.iv = kmalloc(crypto_aead_ivsize(ctx->tfm), GFP_KERNEL); + ctx->vec.assoc = kmalloc(ctx->maxdatasize, GFP_KERNEL); + ctx->vec.ptext = kmalloc(ctx->maxdatasize, GFP_KERNEL); + ctx->vec.ctext = kmalloc(ctx->maxdatasize, GFP_KERNEL); + if (!ctx->vec.key || !ctx->vec.iv || !ctx->vec.assoc || + !ctx->vec.ptext || !ctx->vec.ctext) { + err = -ENOMEM; + goto out; + } + + err = test_aead_vs_generic_impl(ctx); +out: + kfree(ctx->vec.key); + kfree(ctx->vec.iv); + kfree(ctx->vec.assoc); + kfree(ctx->vec.ptext); + kfree(ctx->vec.ctext); + kfree(ctx); + return err; +} #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_aead_vs_generic_impl(const char *driver, - const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) +static int test_aead_extra(const char *driver, + const struct alg_test_desc *test_desc, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { return 0; } @@ -2403,7 +2431,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, if (err) goto out; - err = test_aead_vs_generic_impl(driver, desc, req, tsgls); + err = test_aead_extra(driver, desc, req, tsgls); out: free_cipher_test_sglists(tsgls); aead_request_free(req); -- cgit v1.2.3 From 49763fc6b1af422e742e58fd04e078ab011edd96 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 1 Dec 2019 13:53:30 -0800 Subject: crypto: testmgr - generate inauthentic AEAD test vectors The whole point of using an AEAD over length-preserving encryption is that the data is authenticated. However currently the fuzz tests don't test any inauthentic inputs to verify that the data is actually being authenticated. And only two algorithms ("rfc4543(gcm(aes))" and "ccm(aes)") even have any inauthentic test vectors at all. Therefore, update the AEAD fuzz tests to sometimes generate inauthentic test vectors, either by generating a (ciphertext, AAD) pair without using the key, or by mutating an authentic pair that was generated. To avoid flakiness, only assume this works reliably if the auth tag is at least 8 bytes. Also account for the rfc4106, rfc4309, and rfc7539esp algorithms intentionally ignoring the last 8 AAD bytes, and for some algorithms doing extra checks that result in EINVAL rather than EBADMSG. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 320 +++++++++++++++++++++++++++++++++++++++++++------------ crypto/testmgr.h | 14 ++- 2 files changed, 261 insertions(+), 73 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 4fe210845e78..88f33c0efb23 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -82,6 +82,19 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) struct aead_test_suite { const struct aead_testvec *vecs; unsigned int count; + + /* + * Set if trying to decrypt an inauthentic ciphertext with this + * algorithm might result in EINVAL rather than EBADMSG, due to other + * validation the algorithm does on the inputs such as length checks. + */ + unsigned int einval_allowed : 1; + + /* + * Set if the algorithm intentionally ignores the last 8 bytes of the + * AAD buffer during decryption. + */ + unsigned int esp_aad : 1; }; struct cipher_test_suite { @@ -814,27 +827,39 @@ static unsigned int generate_random_length(unsigned int max_len) } } -/* Sometimes make some random changes to the given data buffer */ -static void mutate_buffer(u8 *buf, size_t count) +/* Flip a random bit in the given nonempty data buffer */ +static void flip_random_bit(u8 *buf, size_t size) +{ + size_t bitpos; + + bitpos = prandom_u32() % (size * 8); + buf[bitpos / 8] ^= 1 << (bitpos % 8); +} + +/* Flip a random byte in the given nonempty data buffer */ +static void flip_random_byte(u8 *buf, size_t size) +{ + buf[prandom_u32() % size] ^= 0xff; +} + +/* Sometimes make some random changes to the given nonempty data buffer */ +static void mutate_buffer(u8 *buf, size_t size) { size_t num_flips; size_t i; - size_t pos; /* Sometimes flip some bits */ if (prandom_u32() % 4 == 0) { - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8); - for (i = 0; i < num_flips; i++) { - pos = prandom_u32() % (count * 8); - buf[pos / 8] ^= 1 << (pos % 8); - } + num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); + for (i = 0; i < num_flips; i++) + flip_random_bit(buf, size); } /* Sometimes flip some bytes */ if (prandom_u32() % 4 == 0) { - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count); + num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); for (i = 0; i < num_flips; i++) - buf[prandom_u32() % count] ^= 0xff; + flip_random_byte(buf, size); } } @@ -1915,7 +1940,6 @@ static int test_aead_vec_cfg(const char *driver, int enc, cfg->iv_offset + (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); struct kvec input[2]; - int expected_error; int err; /* Set the key */ @@ -2036,20 +2060,31 @@ static int test_aead_vec_cfg(const char *driver, int enc, return -EINVAL; } - /* Check for success or failure */ - expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error; - if (err) { - if (err == expected_error) - return 0; - pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", - driver, op, vec_name, expected_error, err, cfg->name); - return err; - } - if (expected_error) { - pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", + /* Check for unexpected success or failure, or wrong error code */ + if ((err == 0 && vec->novrfy) || + (err != vec->crypt_error && !(err == -EBADMSG && vec->novrfy))) { + char expected_error[32]; + + if (vec->novrfy && + vec->crypt_error != 0 && vec->crypt_error != -EBADMSG) + sprintf(expected_error, "-EBADMSG or %d", + vec->crypt_error); + else if (vec->novrfy) + sprintf(expected_error, "-EBADMSG"); + else + sprintf(expected_error, "%d", vec->crypt_error); + if (err) { + pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%s, actual_error=%d, cfg=\"%s\"\n", + driver, op, vec_name, expected_error, err, + cfg->name); + return err; + } + pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%s, cfg=\"%s\"\n", driver, op, vec_name, expected_error, cfg->name); return -EINVAL; } + if (err) /* Expectedly failed. */ + return 0; /* Check for the correct output (ciphertext or plaintext) */ err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, @@ -2128,24 +2163,112 @@ struct aead_extra_tests_ctx { }; /* - * Generate an AEAD test vector from the given implementation. - * Assumes the buffers in 'vec' were already allocated. + * Make at least one random change to a (ciphertext, AAD) pair. "Ciphertext" + * here means the full ciphertext including the authentication tag. The + * authentication tag (and hence also the ciphertext) is assumed to be nonempty. + */ +static void mutate_aead_message(struct aead_testvec *vec, bool esp_aad) +{ + const unsigned int aad_tail_size = esp_aad ? 8 : 0; + const unsigned int authsize = vec->clen - vec->plen; + + if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) { + /* Mutate the AAD */ + flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size); + if (prandom_u32() % 2 == 0) + return; + } + if (prandom_u32() % 2 == 0) { + /* Mutate auth tag (assuming it's at the end of ciphertext) */ + flip_random_bit((u8 *)vec->ctext + vec->plen, authsize); + } else { + /* Mutate any part of the ciphertext */ + flip_random_bit((u8 *)vec->ctext, vec->clen); + } +} + +/* + * Minimum authentication tag size in bytes at which we assume that we can + * reliably generate inauthentic messages, i.e. not generate an authentic + * message by chance. + */ +#define MIN_COLLISION_FREE_AUTHSIZE 8 + +static void generate_aead_message(struct aead_request *req, + const struct aead_test_suite *suite, + struct aead_testvec *vec, + bool prefer_inauthentic) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + const unsigned int ivsize = crypto_aead_ivsize(tfm); + const unsigned int authsize = vec->clen - vec->plen; + const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) && + (prefer_inauthentic || prandom_u32() % 4 == 0); + + /* Generate the AAD. */ + generate_random_bytes((u8 *)vec->assoc, vec->alen); + + if (inauthentic && prandom_u32() % 2 == 0) { + /* Generate a random ciphertext. */ + generate_random_bytes((u8 *)vec->ctext, vec->clen); + } else { + int i = 0; + struct scatterlist src[2], dst; + u8 iv[MAX_IVLEN]; + DECLARE_CRYPTO_WAIT(wait); + + /* Generate a random plaintext and encrypt it. */ + sg_init_table(src, 2); + if (vec->alen) + sg_set_buf(&src[i++], vec->assoc, vec->alen); + if (vec->plen) { + generate_random_bytes((u8 *)vec->ptext, vec->plen); + sg_set_buf(&src[i++], vec->ptext, vec->plen); + } + sg_init_one(&dst, vec->ctext, vec->alen + vec->clen); + memcpy(iv, vec->iv, ivsize); + aead_request_set_callback(req, 0, crypto_req_done, &wait); + aead_request_set_crypt(req, src, &dst, vec->plen, iv); + aead_request_set_ad(req, vec->alen); + vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), + &wait); + /* If encryption failed, we're done. */ + if (vec->crypt_error != 0) + return; + memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen); + if (!inauthentic) + return; + /* + * Mutate the authentic (ciphertext, AAD) pair to get an + * inauthentic one. + */ + mutate_aead_message(vec, suite->esp_aad); + } + vec->novrfy = 1; + if (suite->einval_allowed) + vec->crypt_error = -EINVAL; +} + +/* + * Generate an AEAD test vector 'vec' using the implementation specified by + * 'req'. The buffers in 'vec' must already be allocated. + * + * If 'prefer_inauthentic' is true, then this function will generate inauthentic + * test vectors (i.e. vectors with 'vec->novrfy=1') more often. */ static void generate_random_aead_testvec(struct aead_request *req, struct aead_testvec *vec, + const struct aead_test_suite *suite, unsigned int maxkeysize, unsigned int maxdatasize, - char *name, size_t max_namelen) + char *name, size_t max_namelen, + bool prefer_inauthentic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); const unsigned int ivsize = crypto_aead_ivsize(tfm); const unsigned int maxauthsize = crypto_aead_maxauthsize(tfm); unsigned int authsize; unsigned int total_len; - int i; - struct scatterlist src[2], dst; - u8 iv[MAX_IVLEN]; - DECLARE_CRYPTO_WAIT(wait); /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ vec->klen = maxkeysize; @@ -2161,50 +2284,83 @@ static void generate_random_aead_testvec(struct aead_request *req, authsize = maxauthsize; if (prandom_u32() % 4 == 0) authsize = prandom_u32() % (maxauthsize + 1); + if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE) + authsize = MIN_COLLISION_FREE_AUTHSIZE; if (WARN_ON(authsize > maxdatasize)) authsize = maxdatasize; maxdatasize -= authsize; vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize); - /* Plaintext and associated data */ + /* AAD, plaintext, and ciphertext lengths */ total_len = generate_random_length(maxdatasize); if (prandom_u32() % 4 == 0) vec->alen = 0; else vec->alen = generate_random_length(total_len); vec->plen = total_len - vec->alen; - generate_random_bytes((u8 *)vec->assoc, vec->alen); - generate_random_bytes((u8 *)vec->ptext, vec->plen); - vec->clen = vec->plen + authsize; /* - * If the key or authentication tag size couldn't be set, no need to - * continue to encrypt. + * Generate the AAD, plaintext, and ciphertext. Not applicable if the + * key or the authentication tag size couldn't be set. */ + vec->novrfy = 0; vec->crypt_error = 0; - if (vec->setkey_error || vec->setauthsize_error) - goto done; - - /* Ciphertext */ - sg_init_table(src, 2); - i = 0; - if (vec->alen) - sg_set_buf(&src[i++], vec->assoc, vec->alen); - if (vec->plen) - sg_set_buf(&src[i++], vec->ptext, vec->plen); - sg_init_one(&dst, vec->ctext, vec->alen + vec->clen); - memcpy(iv, vec->iv, ivsize); - aead_request_set_callback(req, 0, crypto_req_done, &wait); - aead_request_set_crypt(req, src, &dst, vec->plen, iv); - aead_request_set_ad(req, vec->alen); - vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait); - if (vec->crypt_error == 0) - memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen); -done: + if (vec->setkey_error == 0 && vec->setauthsize_error == 0) + generate_aead_message(req, suite, vec, prefer_inauthentic); snprintf(name, max_namelen, - "\"random: alen=%u plen=%u authsize=%u klen=%u\"", - vec->alen, vec->plen, authsize, vec->klen); + "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"", + vec->alen, vec->plen, authsize, vec->klen, vec->novrfy); +} + +static void try_to_generate_inauthentic_testvec( + struct aead_extra_tests_ctx *ctx) +{ + int i; + + for (i = 0; i < 10; i++) { + generate_random_aead_testvec(ctx->req, &ctx->vec, + &ctx->test_desc->suite.aead, + ctx->maxkeysize, ctx->maxdatasize, + ctx->vec_name, + sizeof(ctx->vec_name), true); + if (ctx->vec.novrfy) + return; + } +} + +/* + * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the + * result of an encryption with the key) and verify that decryption fails. + */ +static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) +{ + unsigned int i; + int err; + + for (i = 0; i < fuzz_iterations * 8; i++) { + /* + * Since this part of the tests isn't comparing the + * implementation to another, there's no point in testing any + * test vectors other than inauthentic ones (vec.novrfy=1) here. + * + * If we're having trouble generating such a test vector, e.g. + * if the algorithm keeps rejecting the generated keys, don't + * retry forever; just continue on. + */ + try_to_generate_inauthentic_testvec(ctx); + if (ctx->vec.novrfy) { + generate_random_testvec_config(&ctx->cfg, ctx->cfgname, + sizeof(ctx->cfgname)); + err = test_aead_vec_cfg(ctx->driver, DECRYPT, &ctx->vec, + ctx->vec_name, &ctx->cfg, + ctx->req, ctx->tsgls); + if (err) + return err; + } + cond_resched(); + } + return 0; } /* @@ -2285,17 +2441,20 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) */ for (i = 0; i < fuzz_iterations * 8; i++) { generate_random_aead_testvec(generic_req, &ctx->vec, + &ctx->test_desc->suite.aead, ctx->maxkeysize, ctx->maxdatasize, ctx->vec_name, - sizeof(ctx->vec_name)); + sizeof(ctx->vec_name), false); generate_random_testvec_config(&ctx->cfg, ctx->cfgname, sizeof(ctx->cfgname)); - err = test_aead_vec_cfg(driver, ENCRYPT, &ctx->vec, - ctx->vec_name, &ctx->cfg, - ctx->req, ctx->tsgls); - if (err) - goto out; - if (ctx->vec.crypt_error == 0) { + if (!ctx->vec.novrfy) { + err = test_aead_vec_cfg(driver, ENCRYPT, &ctx->vec, + ctx->vec_name, &ctx->cfg, + ctx->req, ctx->tsgls); + if (err) + goto out; + } + if (ctx->vec.crypt_error == 0 || ctx->vec.novrfy) { err = test_aead_vec_cfg(driver, DECRYPT, &ctx->vec, ctx->vec_name, &ctx->cfg, ctx->req, ctx->tsgls); @@ -2348,6 +2507,10 @@ static int test_aead_extra(const char *driver, goto out; } + err = test_aead_inauthentic_inputs(ctx); + if (err) + goto out; + err = test_aead_vs_generic_impl(ctx); out: kfree(ctx->vec.key); @@ -3978,7 +4141,8 @@ static int alg_test_null(const struct alg_test_desc *desc, return 0; } -#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) } +#define ____VECS(tv) .vecs = tv, .count = ARRAY_SIZE(tv) +#define __VECS(tv) { ____VECS(tv) } /* Please keep this list sorted by algorithm name. */ static const struct alg_test_desc alg_test_descs[] = { @@ -4284,7 +4448,10 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = __VECS(aes_ccm_tv_template) + .aead = { + ____VECS(aes_ccm_tv_template), + .einval_allowed = 1, + } } }, { .alg = "cfb(aes)", @@ -5032,7 +5199,11 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = __VECS(aes_gcm_rfc4106_tv_template) + .aead = { + ____VECS(aes_gcm_rfc4106_tv_template), + .einval_allowed = 1, + .esp_aad = 1, + } } }, { .alg = "rfc4309(ccm(aes))", @@ -5040,14 +5211,21 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = __VECS(aes_ccm_rfc4309_tv_template) + .aead = { + ____VECS(aes_ccm_rfc4309_tv_template), + .einval_allowed = 1, + .esp_aad = 1, + } } }, { .alg = "rfc4543(gcm(aes))", .generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))", .test = alg_test_aead, .suite = { - .aead = __VECS(aes_gcm_rfc4543_tv_template) + .aead = { + ____VECS(aes_gcm_rfc4543_tv_template), + .einval_allowed = 1, + } } }, { .alg = "rfc7539(chacha20,poly1305)", @@ -5059,7 +5237,11 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "rfc7539esp(chacha20,poly1305)", .test = alg_test_aead, .suite = { - .aead = __VECS(rfc7539esp_tv_template) + .aead = { + ____VECS(rfc7539esp_tv_template), + .einval_allowed = 1, + .esp_aad = 1, + } } }, { .alg = "rmd128", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 48da646651cb..d29983908c38 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -85,16 +85,22 @@ struct cipher_testvec { * @ctext: Pointer to the full authenticated ciphertext. For AEADs that * produce a separate "ciphertext" and "authentication tag", these * two parts are concatenated: ciphertext || tag. - * @novrfy: Decryption verification failure expected? + * @novrfy: If set, this is an inauthentic input test: only decryption is + * tested, and it is expected to fail with either -EBADMSG or + * @crypt_error if it is nonzero. * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * (e.g. setkey() needs to fail due to a weak key) * @klen: Length of @key in bytes * @plen: Length of @ptext in bytes * @alen: Length of @assoc in bytes * @clen: Length of @ctext in bytes - * @setkey_error: Expected error from setkey() - * @setauthsize_error: Expected error from setauthsize() - * @crypt_error: Expected error from encrypt() and decrypt() + * @setkey_error: Expected error from setkey(). If set, neither encryption nor + * decryption is tested. + * @setauthsize_error: Expected error from setauthsize(). If set, neither + * encryption nor decryption is tested. + * @crypt_error: When @novrfy=0, the expected error from encrypt(). When + * @novrfy=1, an optional alternate error code that is acceptable + * for decrypt() to return besides -EBADMSG. */ struct aead_testvec { const char *key; -- cgit v1.2.3 From c441a909c68618ff64aa70394d0b270b0665a229 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 2 Dec 2019 13:42:29 -0800 Subject: crypto: compress - remove crt_u.compress (struct compress_tfm) crt_u.compress (struct compress_tfm) is pointless because its two fields, ->cot_compress() and ->cot_decompress(), always point to crypto_compress() and crypto_decompress(). Remove this pointless indirection, and just make crypto_comp_compress() and crypto_comp_decompress() be direct calls to what used to be crypto_compress() and crypto_decompress(). Also remove the unused function crypto_comp_cast(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/api.c | 2 +- crypto/compress.c | 31 ++++++++++++------------------- crypto/internal.h | 1 - include/linux/crypto.h | 43 ++++++------------------------------------- 4 files changed, 19 insertions(+), 58 deletions(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index 4d3d13872fac..268129979bc2 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -301,7 +301,7 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return crypto_init_cipher_ops(tfm); case CRYPTO_ALG_TYPE_COMPRESS: - return crypto_init_compress_ops(tfm); + return 0; default: break; diff --git a/crypto/compress.c b/crypto/compress.c index e9edf8524787..9048fe390c46 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -6,34 +6,27 @@ * * Copyright (c) 2002 James Morris */ -#include #include -#include -#include #include "internal.h" -static int crypto_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) +int crypto_comp_compress(struct crypto_comp *comp, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) { + struct crypto_tfm *tfm = crypto_comp_tfm(comp); + return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, dlen); } +EXPORT_SYMBOL_GPL(crypto_comp_compress); -static int crypto_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) +int crypto_comp_decompress(struct crypto_comp *comp, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) { + struct crypto_tfm *tfm = crypto_comp_tfm(comp); + return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, dlen); } - -int crypto_init_compress_ops(struct crypto_tfm *tfm) -{ - struct compress_tfm *ops = &tfm->crt_compress; - - ops->cot_compress = crypto_compress; - ops->cot_decompress = crypto_decompress; - - return 0; -} +EXPORT_SYMBOL_GPL(crypto_comp_decompress); diff --git a/crypto/internal.h b/crypto/internal.h index 93df7bec844a..a58a2af4b669 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -59,7 +59,6 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); int crypto_init_cipher_ops(struct crypto_tfm *tfm); -int crypto_init_compress_ops(struct crypto_tfm *tfm); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_larval_kill(struct crypto_alg *alg); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 23365a9d062e..8f708564b98b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -606,17 +606,7 @@ struct cipher_tfm { void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -struct compress_tfm { - int (*cot_compress)(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - int (*cot_decompress)(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); -}; - #define crt_cipher crt_u.cipher -#define crt_compress crt_u.compress struct crypto_tfm { @@ -624,7 +614,6 @@ struct crypto_tfm { union { struct cipher_tfm cipher; - struct compress_tfm compress; } crt_u; void (*exit)(struct crypto_tfm *tfm); @@ -928,13 +917,6 @@ static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) return (struct crypto_comp *)tfm; } -static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) -{ - BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & - CRYPTO_ALG_TYPE_MASK); - return __crypto_comp_cast(tfm); -} - static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, u32 type, u32 mask) { @@ -969,26 +951,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm) return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); } -static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) -{ - return &crypto_comp_tfm(tfm)->crt_compress; -} - -static inline int crypto_comp_compress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), - src, slen, dst, dlen); -} +int crypto_comp_compress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); -static inline int crypto_comp_decompress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), - src, slen, dst, dlen); -} +int crypto_comp_decompress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); #endif /* _LINUX_CRYPTO_H */ -- cgit v1.2.3 From e8cfed5e4e2b5929371955f476a52a4c3398ead3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 2 Dec 2019 13:42:30 -0800 Subject: crypto: cipher - remove crt_u.cipher (struct cipher_tfm) Of the three fields in crt_u.cipher (struct cipher_tfm), ->cit_setkey() is pointless because it always points to setkey() in crypto/cipher.c. ->cit_decrypt_one() and ->cit_encrypt_one() are slightly less pointless, since if the algorithm doesn't have an alignmask, they are set directly to ->cia_encrypt() and ->cia_decrypt(). However, this "optimization" isn't worthwhile because: - The "cipher" algorithm type is the only algorithm still using crt_u, so it's bloating every struct crypto_tfm for every algorithm type. - If the algorithm has an alignmask, this "optimization" actually makes things slower, as it causes 2 indirect calls per block rather than 1. - It adds extra code complexity. - Some templates already call ->cia_encrypt()/->cia_decrypt() directly instead of going through ->cit_encrypt_one()/->cit_decrypt_one(). - The "cipher" algorithm type never gives optimal performance anyway. For that, a higher-level type such as skcipher needs to be used. Therefore, just remove the extra indirection, and make crypto_cipher_setkey(), crypto_cipher_encrypt_one(), and crypto_cipher_decrypt_one() be direct calls into crypto/cipher.c. Also remove the unused function crypto_cipher_cast(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/api.c | 15 +------- crypto/cipher.c | 92 ++++++++++++++++++++------------------------------ crypto/internal.h | 2 -- include/linux/crypto.h | 48 ++++---------------------- 4 files changed, 43 insertions(+), 114 deletions(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index 268129979bc2..ef96142ceca7 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -295,20 +295,7 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) if (type_obj) return type_obj->init(tfm, type, mask); - - switch (crypto_tfm_alg_type(tfm)) { - case CRYPTO_ALG_TYPE_CIPHER: - return crypto_init_cipher_ops(tfm); - - case CRYPTO_ALG_TYPE_COMPRESS: - return 0; - - default: - break; - } - - BUG(); - return -EINVAL; + return 0; } static void crypto_exit_ops(struct crypto_tfm *tfm) diff --git a/crypto/cipher.c b/crypto/cipher.c index 108427026e7c..aadd51cb7250 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -2,7 +2,7 @@ /* * Cryptographic API. * - * Cipher operations. + * Single-block cipher operations. * * Copyright (c) 2002 James Morris * Copyright (c) 2005 Herbert Xu @@ -16,11 +16,11 @@ #include #include "internal.h" -static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, +static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { - struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + struct cipher_alg *cia = crypto_cipher_alg(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; @@ -32,83 +32,63 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); - ret = cia->cia_setkey(tfm, alignbuffer, keylen); + ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; } -static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen) { - struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + struct cipher_alg *cia = crypto_cipher_alg(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + crypto_cipher_clear_flags(tfm, CRYPTO_TFM_RES_MASK); if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_cipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); - return cia->cia_setkey(tfm, key, keylen); + return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } +EXPORT_SYMBOL_GPL(crypto_cipher_setkey); -static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, - const u8 *), - struct crypto_tfm *tfm, - u8 *dst, const u8 *src) +static inline void cipher_crypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src, bool enc) { - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); - unsigned int size = crypto_tfm_alg_blocksize(tfm); - u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; - u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - - memcpy(tmp, src, size); - fn(tfm, tmp, tmp); - memcpy(dst, tmp, size); -} - -static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, - u8 *dst, const u8 *src) -{ - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + unsigned long alignmask = crypto_cipher_alignmask(tfm); + struct cipher_alg *cia = crypto_cipher_alg(tfm); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + enc ? cia->cia_encrypt : cia->cia_decrypt; if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { - cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); - return; + unsigned int bs = crypto_cipher_blocksize(tfm); + u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; + u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + + memcpy(tmp, src, bs); + fn(crypto_cipher_tfm(tfm), tmp, tmp); + memcpy(dst, tmp, bs); + } else { + fn(crypto_cipher_tfm(tfm), dst, src); } - - cipher->cia_encrypt(tfm, dst, src); } -static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, - u8 *dst, const u8 *src) +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) { - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; - - if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { - cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); - return; - } - - cipher->cia_decrypt(tfm, dst, src); + cipher_crypt_one(tfm, dst, src, true); } +EXPORT_SYMBOL_GPL(crypto_cipher_encrypt_one); -int crypto_init_cipher_ops(struct crypto_tfm *tfm) +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) { - struct cipher_tfm *ops = &tfm->crt_cipher; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; - - ops->cit_setkey = setkey; - ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? - cipher_encrypt_unaligned : cipher->cia_encrypt; - ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? - cipher_decrypt_unaligned : cipher->cia_decrypt; - - return 0; + cipher_crypt_one(tfm, dst, src, false); } +EXPORT_SYMBOL_GPL(crypto_cipher_decrypt_one); diff --git a/crypto/internal.h b/crypto/internal.h index a58a2af4b669..ff06a3bd1ca1 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -58,8 +58,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg) struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); -int crypto_init_cipher_ops(struct crypto_tfm *tfm); - struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_larval_kill(struct crypto_alg *alg); void crypto_alg_tested(const char *name, int err); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 8f708564b98b..c23f1eed7970 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -599,23 +599,10 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); * crypto_free_*(), as well as the various helpers below. */ -struct cipher_tfm { - int (*cit_setkey)(struct crypto_tfm *tfm, - const u8 *key, unsigned int keylen); - void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -}; - -#define crt_cipher crt_u.cipher - struct crypto_tfm { u32 crt_flags; - union { - struct cipher_tfm cipher; - } crt_u; - void (*exit)(struct crypto_tfm *tfm); struct crypto_alg *__crt_alg; @@ -752,12 +739,6 @@ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) return (struct crypto_cipher *)tfm; } -static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) -{ - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); - return __crypto_cipher_cast(tfm); -} - /** * crypto_alloc_cipher() - allocate single block cipher handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -815,11 +796,6 @@ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } -static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) -{ - return &crypto_cipher_tfm(tfm)->crt_cipher; -} - /** * crypto_cipher_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -873,12 +849,8 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ -static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, - const u8 *key, unsigned int keylen) -{ - return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), - key, keylen); -} +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen); /** * crypto_cipher_encrypt_one() - encrypt one block of plaintext @@ -889,12 +861,8 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, * Invoke the encryption operation of one block. The caller must ensure that * the plaintext and ciphertext buffers are at least one block in size. */ -static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src) -{ - crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), - dst, src); -} +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); /** * crypto_cipher_decrypt_one() - decrypt one block of ciphertext @@ -905,12 +873,8 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, * Invoke the decryption operation of one block. The caller must ensure that * the plaintext and ciphertext buffers are at least one block in size. */ -static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src) -{ - crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), - dst, src); -} +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { -- cgit v1.2.3 From 91a71d612128f84f725022d7b7c5d5a741f6fdc7 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 3 Dec 2019 14:31:12 -0500 Subject: padata: remove cpumask change notifier Since commit 63d3578892dc ("crypto: pcrypt - remove padata cpumask notifier") this feature is unused, so get rid of it. Signed-off-by: Daniel Jordan Cc: Eric Biggers Cc: Herbert Xu Cc: Jonathan Corbet Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- Documentation/padata.txt | 24 ---------------------- crypto/pcrypt.c | 1 - include/linux/padata.h | 11 ---------- kernel/padata.c | 52 +----------------------------------------------- 4 files changed, 1 insertion(+), 87 deletions(-) (limited to 'crypto') diff --git a/Documentation/padata.txt b/Documentation/padata.txt index b37ba1eaace3..b45df9c6547b 100644 --- a/Documentation/padata.txt +++ b/Documentation/padata.txt @@ -75,30 +75,6 @@ To simply add or remove one CPU from a certain cpumask the functions padata_add_cpu/padata_remove_cpu are used. cpu specifies the CPU to add or remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL. -If a user is interested in padata cpumask changes, he can register to -the padata cpumask change notifier:: - - int padata_register_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock); - -To unregister from that notifier:: - - int padata_unregister_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock); - -The padata cpumask change notifier notifies about changes of the usable -cpumasks, i.e. the subset of active CPUs in the user supplied cpumask. - -Padata calls the notifier chain with:: - - blocking_notifier_call_chain(&pinst->cpumask_change_notifier, - notification_mask, - &pd_new->cpumask); - -Here cpumask_change_notifier is registered notifier, notification_mask -is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer -to a struct padata_cpumask that contains the new cpumask information. - Actually submitting work to the padata instance requires the creation of a padata_priv structure:: diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index a4f3b3f342c8..d6696e217128 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/padata.h b/include/linux/padata.h index cccab7a59787..178d5cc6b494 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -13,7 +13,6 @@ #include #include #include -#include #include #define PADATA_CPU_SERIAL 0x01 @@ -151,10 +150,6 @@ struct padata_shell { * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. - * @omask: Temporary storage used to compute the notification mask. - * @cpumask_change_notifier: Notifiers chain for user-defined notify - * callbacks that will be called when either @pcpu or @cbcpu - * or both cpumasks change. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. @@ -166,8 +161,6 @@ struct padata_instance { struct list_head pslist; struct padata_cpumask cpumask; struct padata_cpumask rcpumask; - cpumask_var_t omask; - struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; u8 flags; @@ -187,8 +180,4 @@ extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); -extern int padata_register_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock); -extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock); #endif diff --git a/kernel/padata.c b/kernel/padata.c index f5964f015139..bc594c00b26e 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -514,23 +514,16 @@ static int padata_replace_one(struct padata_shell *ps) static int padata_replace(struct padata_instance *pinst) { - int notification_mask = 0; struct padata_shell *ps; int err; pinst->flags |= PADATA_RESET; - cpumask_copy(pinst->omask, pinst->rcpumask.pcpu); cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); - if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu)) - notification_mask |= PADATA_CPU_PARALLEL; - cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu); cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); - if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu)) - notification_mask |= PADATA_CPU_SERIAL; list_for_each_entry(ps, &pinst->pslist, list) { err = padata_replace_one(ps); @@ -544,48 +537,11 @@ static int padata_replace(struct padata_instance *pinst) if (atomic_dec_and_test(&ps->opd->refcnt)) padata_free_pd(ps->opd); - if (notification_mask) - blocking_notifier_call_chain(&pinst->cpumask_change_notifier, - notification_mask, - &pinst->cpumask); - pinst->flags &= ~PADATA_RESET; return err; } -/** - * padata_register_cpumask_notifier - Registers a notifier that will be called - * if either pcpu or cbcpu or both cpumasks change. - * - * @pinst: A poineter to padata instance - * @nblock: A pointer to notifier block. - */ -int padata_register_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock) -{ - return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, - nblock); -} -EXPORT_SYMBOL(padata_register_cpumask_notifier); - -/** - * padata_unregister_cpumask_notifier - Unregisters cpumask notifier - * registered earlier using padata_register_cpumask_notifier - * - * @pinst: A pointer to data instance. - * @nlock: A pointer to notifier block. - */ -int padata_unregister_cpumask_notifier(struct padata_instance *pinst, - struct notifier_block *nblock) -{ - return blocking_notifier_chain_unregister( - &pinst->cpumask_change_notifier, - nblock); -} -EXPORT_SYMBOL(padata_unregister_cpumask_notifier); - - /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) @@ -785,7 +741,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); padata_stop(pinst); - free_cpumask_var(pinst->omask); free_cpumask_var(pinst->rcpumask.cbcpu); free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); @@ -965,8 +920,6 @@ static struct padata_instance *padata_alloc(const char *name, goto err_free_masks; if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) goto err_free_rcpumask_pcpu; - if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL)) - goto err_free_rcpumask_cbcpu; INIT_LIST_HEAD(&pinst->pslist); @@ -976,11 +929,10 @@ static struct padata_instance *padata_alloc(const char *name, cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); if (padata_setup_cpumasks(pinst)) - goto err_free_omask; + goto err_free_rcpumask_cbcpu; pinst->flags = 0; - BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); @@ -994,8 +946,6 @@ static struct padata_instance *padata_alloc(const char *name, return pinst; -err_free_omask: - free_cpumask_var(pinst->omask); err_free_rcpumask_cbcpu: free_cpumask_var(pinst->rcpumask.cbcpu); err_free_rcpumask_pcpu: -- cgit v1.2.3 From 37f96694cf73ba116993a9d2d99ad6a75fa7fdb0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 5 Dec 2019 13:45:05 +0800 Subject: crypto: af_alg - Use bh_lock_sock in sk_destruct As af_alg_release_parent may be called from BH context (most notably due to an async request that only completes after socket closure, or as reported here because of an RCU-delayed sk_destruct call), we must use bh_lock_sock instead of lock_sock. Reported-by: syzbot+c2f1558d49e25cc36e5e@syzkaller.appspotmail.com Reported-by: Eric Dumazet Fixes: c840ac6af3f8 ("crypto: af_alg - Disallow bind/setkey/...") Cc: Signed-off-by: Herbert Xu --- crypto/af_alg.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 0dceaabc6321..3d8e53010cda 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -134,11 +134,13 @@ void af_alg_release_parent(struct sock *sk) sk = ask->parent; ask = alg_sk(sk); - lock_sock(sk); + local_bh_disable(); + bh_lock_sock(sk); ask->nokey_refcnt -= nokey; if (!last) last = !--ask->refcnt; - release_sock(sk); + bh_unlock_sock(sk); + local_bh_enable(); if (last) sock_put(sk); -- cgit v1.2.3 From 7db3b61b6bba4310f454588c2ca6faf2958ad79f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 6 Dec 2019 13:55:17 +0800 Subject: crypto: api - Check spawn->alg under lock in crypto_drop_spawn We need to check whether spawn->alg is NULL under lock as otherwise the algorithm could be removed from under us after we have checked it and found it to be non-NULL. This could cause us to remove the spawn from a non-existent list. Fixes: 7ede5a5ba55a ("crypto: api - Fix crypto_drop_spawn crash...") Cc: Signed-off-by: Herbert Xu --- crypto/algapi.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index b052f38edba6..9ecb4a57b342 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -669,11 +669,9 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn); void crypto_drop_spawn(struct crypto_spawn *spawn) { - if (!spawn->alg) - return; - down_write(&crypto_alg_sem); - list_del(&spawn->list); + if (spawn->alg) + list_del(&spawn->list); up_write(&crypto_alg_sem); } EXPORT_SYMBOL_GPL(crypto_drop_spawn); -- cgit v1.2.3 From 73669cc556462f4e50376538d77ee312142e8a8a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 7 Dec 2019 22:15:15 +0800 Subject: crypto: api - Fix race condition in crypto_spawn_alg The function crypto_spawn_alg is racy because it drops the lock before shooting the dying algorithm. The algorithm could disappear altogether before we shoot it. This patch fixes it by moving the shooting into the locked section. Fixes: 6bfd48096ff8 ("[CRYPTO] api: Added spawns") Signed-off-by: Herbert Xu --- crypto/algapi.c | 16 +++++----------- crypto/api.c | 3 +-- crypto/internal.h | 1 - 3 files changed, 6 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 9ecb4a57b342..54e844ad9364 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -679,22 +679,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { struct crypto_alg *alg; - struct crypto_alg *alg2; down_read(&crypto_alg_sem); alg = spawn->alg; - alg2 = alg; - if (alg2) - alg2 = crypto_mod_get(alg2); - up_read(&crypto_alg_sem); - - if (!alg2) { - if (alg) - crypto_shoot_alg(alg); - return ERR_PTR(-EAGAIN); + if (alg && !crypto_mod_get(alg)) { + alg->cra_flags |= CRYPTO_ALG_DYING; + alg = NULL; } + up_read(&crypto_alg_sem); - return alg; + return alg ?: ERR_PTR(-EAGAIN); } struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, diff --git a/crypto/api.c b/crypto/api.c index ef96142ceca7..676d54ffada8 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -331,13 +331,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) return len; } -void crypto_shoot_alg(struct crypto_alg *alg) +static void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } -EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) diff --git a/crypto/internal.h b/crypto/internal.h index ff06a3bd1ca1..d5ebc60c5143 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -65,7 +65,6 @@ void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); -void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm(struct crypto_alg *alg, -- cgit v1.2.3 From 4f87ee118d16b4b2116a477229573ed5003b0d78 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 7 Dec 2019 22:15:17 +0800 Subject: crypto: api - Do not zap spawn->alg Currently when a spawn is removed we will zap its alg field. This is racy because the spawn could belong to an unregistered instance which may dereference the spawn->alg field. This patch fixes this by keeping spawn->alg constant and instead adding a new spawn->dead field to indicate that a spawn is going away. Signed-off-by: Herbert Xu --- crypto/algapi.c | 22 ++++++++++++---------- include/crypto/algapi.h | 1 + 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 54e844ad9364..e858946adeed 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -93,15 +93,17 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, if (!spawn) return NULL; - n = list_next_entry(spawn, list); + n = list_prev_entry(spawn, list); + list_move(&spawn->list, secondary_spawns); - if (spawn->alg && &n->list != stack && !n->alg) - n->alg = (n->list.next == stack) ? alg : - &list_next_entry(n, list)->inst->alg; + if (list_is_last(&n->list, stack)) + return top; - list_move(&spawn->list, secondary_spawns); + n = list_next_entry(n, list); + if (!spawn->dead) + n->dead = false; - return &n->list == stack ? top : &n->inst->alg.cra_users; + return &n->inst->alg.cra_users; } static void crypto_remove_instance(struct crypto_instance *inst, @@ -160,7 +162,7 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, if (&inst->alg == nalg) break; - spawn->alg = NULL; + spawn->dead = true; spawns = &inst->alg.cra_users; /* @@ -179,7 +181,7 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, &secondary_spawns))); list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { - if (spawn->alg) + if (!spawn->dead) list_move(&spawn->list, &spawn->alg->cra_users); else crypto_remove_instance(spawn->inst, list); @@ -670,7 +672,7 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn); void crypto_drop_spawn(struct crypto_spawn *spawn) { down_write(&crypto_alg_sem); - if (spawn->alg) + if (!spawn->dead) list_del(&spawn->list); up_write(&crypto_alg_sem); } @@ -682,7 +684,7 @@ static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) down_read(&crypto_alg_sem); alg = spawn->alg; - if (alg && !crypto_mod_get(alg)) { + if (!spawn->dead && !crypto_mod_get(alg)) { alg->cra_flags |= CRYPTO_ALG_DYING; alg = NULL; } diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 5cd846defdd6..771a295ac755 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -70,6 +70,7 @@ struct crypto_spawn { struct crypto_instance *inst; const struct crypto_type *frontend; u32 mask; + bool dead; }; struct crypto_queue { -- cgit v1.2.3 From 02244ba44bc366c38e188a3a90afc63dffae9897 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 7 Dec 2019 22:33:51 +0800 Subject: crypto: api - Add more comments to crypto_remove_spawns This patch explains the logic behind crypto_remove_spawns and its underling crypto_more_spawns. Signed-off-by: Herbert Xu --- crypto/algapi.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index e858946adeed..cd643e294664 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -82,6 +82,15 @@ static void crypto_destroy_instance(struct crypto_alg *alg) crypto_tmpl_put(tmpl); } +/* + * This function adds a spawn to the list secondary_spawns which + * will be used at the end of crypto_remove_spawns to unregister + * instances, unless the spawn happens to be one that is depended + * on by the new algorithm (nalg in crypto_remove_spawns). + * + * This function is also responsible for resurrecting any algorithms + * in the dependency chain of nalg by unsetting n->dead. + */ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, struct list_head *stack, struct list_head *top, @@ -128,6 +137,12 @@ static void crypto_remove_instance(struct crypto_instance *inst, BUG_ON(!list_empty(&inst->alg.cra_users)); } +/* + * Given an algorithm alg, remove all algorithms that depend on it + * through spawns. If nalg is not null, then exempt any algorithms + * that is depended on by nalg. This is useful when nalg itself + * depends on alg. + */ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg) { @@ -146,6 +161,11 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, list_move(&spawn->list, &top); } + /* + * Perform a depth-first walk starting from alg through + * the cra_users tree. The list stack records the path + * from alg to the current spawn. + */ spawns = ⊤ do { while (!list_empty(spawns)) { @@ -180,6 +200,11 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, } while ((spawns = crypto_more_spawns(alg, &stack, &top, &secondary_spawns))); + /* + * Remove all instances that are marked as dead. Also + * complete the resurrection of the others by moving them + * back to the cra_users list. + */ list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { if (!spawn->dead) list_move(&spawn->list, &spawn->alg->cra_users); -- cgit v1.2.3 From fbce6be5aef5bad46f3af1650f7a62ec1b34318e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 8 Dec 2019 13:42:51 +0800 Subject: crypto: shash - Add init_tfm/exit_tfm and verify descsize The shash interface supports a dynamic descsize field because of the presence of fallbacks (it's just padlock-sha actually, perhaps we can remove it one day). As it is the API does not verify the setting of descsize at all. It is up to the individual algorithms to ensure that descsize does not exceed the specified maximum value of HASH_MAX_DESCSIZE (going above would cause stack corruption). In order to allow the API to impose this limit directly, this patch adds init_tfm/exit_tfm hooks to the shash_alg structure. We can then verify the descsize setting in the API directly. Signed-off-by: Herbert Xu Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 26 ++++++++++++++++++++++++++ include/crypto/hash.h | 13 +++++++++++++ 2 files changed, 39 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 7989258a46b4..8042bb0df9c0 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -385,15 +385,41 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) return 0; } +static void crypto_shash_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_shash *hash = __crypto_shash_cast(tfm); + struct shash_alg *alg = crypto_shash_alg(hash); + + alg->exit_tfm(hash); +} + static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); struct shash_alg *alg = crypto_shash_alg(hash); + int err; hash->descsize = alg->descsize; shash_set_needkey(hash, alg); + if (alg->exit_tfm) + tfm->exit = crypto_shash_exit_tfm; + + if (!alg->init_tfm) + return 0; + + err = alg->init_tfm(hash); + if (err) + return err; + + /* ->init_tfm() may have increased the descsize. */ + if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) { + if (alg->exit_tfm) + alg->exit_tfm(hash); + return -EINVAL; + } + return 0; } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index fe7f73bad1e2..cee446c59497 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -169,6 +169,17 @@ struct shash_desc { * @export: see struct ahash_alg * @import: see struct ahash_alg * @setkey: see struct ahash_alg + * @init_tfm: Initialize the cryptographic transformation object. + * This function is called only once at the instantiation + * time, right after the transformation context was + * allocated. In case the cryptographic hardware has + * some special requirements which need to be handled + * by software, this function shall check for the precise + * requirement of the transformation and put any software + * fallbacks in place. + * @exit_tfm: Deinitialize the cryptographic transformation object. + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. * @digestsize: see struct ahash_alg * @statesize: see struct ahash_alg * @descsize: Size of the operational state for the message digest. This state @@ -189,6 +200,8 @@ struct shash_alg { int (*import)(struct shash_desc *desc, const void *in); int (*setkey)(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); + int (*init_tfm)(struct crypto_shash *tfm); + void (*exit_tfm)(struct crypto_shash *tfm); unsigned int descsize; -- cgit v1.2.3 From d9e1670b8005999998b557c0e301bb68192bb1c8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 8 Dec 2019 13:42:53 +0800 Subject: crypto: hmac - Use init_tfm/exit_tfm interface This patch switches hmac over to the new init_tfm/exit_tfm interface as opposed to cra_init/cra_exit. This way the shash API can make sure that descsize does not exceed the maximum. This patch also adds the API helper shash_alg_instance. Signed-off-by: Herbert Xu Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hmac.c | 20 +++++++------------- include/crypto/internal/hash.h | 6 ++++++ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index 377f07733e2f..685e49953605 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -138,12 +138,11 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data, crypto_shash_finup(desc, out, ds, out); } -static int hmac_init_tfm(struct crypto_tfm *tfm) +static int hmac_init_tfm(struct crypto_shash *parent) { - struct crypto_shash *parent = __crypto_shash_cast(tfm); struct crypto_shash *hash; - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); + struct shash_instance *inst = shash_alg_instance(parent); + struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); struct hmac_ctx *ctx = hmac_ctx(parent); hash = crypto_spawn_shash(spawn); @@ -152,18 +151,14 @@ static int hmac_init_tfm(struct crypto_tfm *tfm) parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); - if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) { - crypto_free_shash(hash); - return -EINVAL; - } ctx->hash = hash; return 0; } -static void hmac_exit_tfm(struct crypto_tfm *tfm) +static void hmac_exit_tfm(struct crypto_shash *parent) { - struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); + struct hmac_ctx *ctx = hmac_ctx(parent); crypto_free_shash(ctx->hash); } @@ -217,9 +212,6 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + ALIGN(ss * 2, crypto_tfm_ctx_alignment()); - inst->alg.base.cra_init = hmac_init_tfm; - inst->alg.base.cra_exit = hmac_exit_tfm; - inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; @@ -227,6 +219,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.export = hmac_export; inst->alg.import = hmac_import; inst->alg.setkey = hmac_setkey; + inst->alg.init_tfm = hmac_init_tfm; + inst->alg.exit_tfm = hmac_exit_tfm; err = shash_register_instance(tmpl, inst); if (err) { diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f68dab38f160..cf8d7f99c93d 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -220,6 +220,12 @@ static inline struct shash_instance *shash_instance( struct shash_instance, alg); } +static inline struct shash_instance *shash_alg_instance( + struct crypto_shash *shash) +{ + return shash_instance(crypto_tfm_alg_instance(&shash->base)); +} + static inline void *shash_instance_ctx(struct shash_instance *inst) { return crypto_instance_ctx(shash_crypto_instance(inst)); -- cgit v1.2.3 From 2bbb3375d967155bccc86a5887d4a6e29c56b683 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 11 Dec 2019 10:50:11 +0800 Subject: crypto: api - fix unexpectedly getting generic implementation When CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y, the first lookup of an algorithm that needs to be instantiated using a template will always get the generic implementation, even when an accelerated one is available. This happens because the extra self-tests for the accelerated implementation allocate the generic implementation for comparison purposes, and then crypto_alg_tested() for the generic implementation "fulfills" the original request (i.e. sets crypto_larval::adult). This patch fixes this by only fulfilling the original request if we are currently the best outstanding larval as judged by the priority. If we're not the best then we will ask all waiters on that larval request to retry the lookup. Note that this patch introduces a behaviour change when the module providing the new algorithm is unregistered during the process. Previously we would have failed with ENOENT, after the patch we will instead redo the lookup. Fixes: 9a8a6b3f0950 ("crypto: testmgr - fuzz hashes against...") Fixes: d435e10e67be ("crypto: testmgr - fuzz skciphers against...") Fixes: 40153b10d91c ("crypto: testmgr - fuzz AEADs against...") Reported-by: Eric Biggers Signed-off-by: Herbert Xu Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 24 +++++++++++++++++++++--- crypto/api.c | 4 +++- 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index cd643e294664..9589b3f0041b 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -284,6 +284,7 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); + bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -307,6 +308,21 @@ found: alg->cra_flags |= CRYPTO_ALG_TESTED; + /* Only satisfy larval waiters if we are the best. */ + best = true; + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (crypto_is_moribund(q) || !crypto_is_larval(q)) + continue; + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (q->cra_priority > alg->cra_priority) { + best = false; + break; + } + } + list_for_each_entry(q, &crypto_alg_list, cra_list) { if (q == alg) continue; @@ -330,10 +346,12 @@ found: continue; if ((q->cra_flags ^ alg->cra_flags) & larval->mask) continue; - if (!crypto_mod_get(alg)) - continue; - larval->adult = alg; + if (best && crypto_mod_get(alg)) + larval->adult = alg; + else + larval->adult = ERR_PTR(-EAGAIN); + continue; } diff --git a/crypto/api.c b/crypto/api.c index 676d54ffada8..7d71a9b10e5f 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -97,7 +97,7 @@ static void crypto_larval_destroy(struct crypto_alg *alg) struct crypto_larval *larval = (void *)alg; BUG_ON(!crypto_is_larval(alg)); - if (larval->adult) + if (!IS_ERR_OR_NULL(larval->adult)) crypto_mod_put(larval->adult); kfree(larval); } @@ -178,6 +178,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-ETIMEDOUT); else if (!alg) alg = ERR_PTR(-ENOENT); + else if (IS_ERR(alg)) + ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); -- cgit v1.2.3 From c6d633a927499f35a06455a960ad6b5a59c87c2c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 15 Dec 2019 15:51:19 -0800 Subject: crypto: algapi - make unregistration functions return void Some of the algorithm unregistration functions return -ENOENT when asked to unregister a non-registered algorithm, while others always return 0 or always return void. But no users check the return value, except for two of the bulk unregistration functions which print a message on error but still always return 0 to their caller, and crypto_del_alg() which calls crypto_unregister_instance() which always returns 0. Since unregistering a non-registered algorithm is always a kernel bug but there isn't anything callers should do to handle this situation at runtime, let's simplify things by making all the unregistration functions return void, and moving the error message into crypto_unregister_alg() and upgrading it to a WARN(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/crypto/devel-algos.rst | 34 ++++++++++++---------------------- crypto/acompress.c | 4 ++-- crypto/ahash.c | 4 ++-- crypto/algapi.c | 25 ++++++++----------------- crypto/crypto_user_base.c | 3 ++- crypto/scompress.c | 4 ++-- crypto/shash.c | 19 ++++++------------- include/crypto/algapi.h | 2 +- include/crypto/internal/acompress.h | 4 +--- include/crypto/internal/hash.h | 6 +++--- include/crypto/internal/scompress.h | 4 +--- include/linux/crypto.h | 4 ++-- 12 files changed, 42 insertions(+), 71 deletions(-) (limited to 'crypto') diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst index fb6b7979a1de..f225a953ab4b 100644 --- a/Documentation/crypto/devel-algos.rst +++ b/Documentation/crypto/devel-algos.rst @@ -31,28 +31,18 @@ The counterparts to those functions are listed below. :: - int crypto_unregister_alg(struct crypto_alg *alg); - int crypto_unregister_algs(struct crypto_alg *algs, int count); + void crypto_unregister_alg(struct crypto_alg *alg); + void crypto_unregister_algs(struct crypto_alg *algs, int count); -Notice that both registration and unregistration functions do return a -value, so make sure to handle errors. A return code of zero implies -success. Any return code < 0 implies an error. +The registration functions return 0 on success, or a negative errno +value on failure. crypto_register_algs() succeeds only if it +successfully registered all the given algorithms; if it fails partway +through, then any changes are rolled back. -The bulk registration/unregistration functions register/unregister each -transformation in the given array of length count. They handle errors as -follows: - -- crypto_register_algs() succeeds if and only if it successfully - registers all the given transformations. If an error occurs partway - through, then it rolls back successful registrations before returning - the error code. Note that if a driver needs to handle registration - errors for individual transformations, then it will need to use the - non-bulk function crypto_register_alg() instead. - -- crypto_unregister_algs() tries to unregister all the given - transformations, continuing on error. It logs errors and always - returns zero. +The unregistration functions always succeed, so they don't have a +return value. Don't try to unregister algorithms that aren't +currently registered. Single-Block Symmetric Ciphers [CIPHER] --------------------------------------- @@ -169,10 +159,10 @@ are as follows: :: - int crypto_unregister_ahash(struct ahash_alg *alg); + void crypto_unregister_ahash(struct ahash_alg *alg); - int crypto_unregister_shash(struct shash_alg *alg); - int crypto_unregister_shashes(struct shash_alg *algs, int count); + void crypto_unregister_shash(struct shash_alg *alg); + void crypto_unregister_shashes(struct shash_alg *algs, int count); Cipher Definition With struct shash_alg and ahash_alg diff --git a/crypto/acompress.c b/crypto/acompress.c index abadcb035a41..84a76723e851 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -151,9 +151,9 @@ int crypto_register_acomp(struct acomp_alg *alg) } EXPORT_SYMBOL_GPL(crypto_register_acomp); -int crypto_unregister_acomp(struct acomp_alg *alg) +void crypto_unregister_acomp(struct acomp_alg *alg) { - return crypto_unregister_alg(&alg->base); + crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_acomp); diff --git a/crypto/ahash.c b/crypto/ahash.c index 3815b363a693..181bd851b429 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -598,9 +598,9 @@ int crypto_register_ahash(struct ahash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_register_ahash); -int crypto_unregister_ahash(struct ahash_alg *alg) +void crypto_unregister_ahash(struct ahash_alg *alg) { - return crypto_unregister_alg(&alg->halg.base); + crypto_unregister_alg(&alg->halg.base); } EXPORT_SYMBOL_GPL(crypto_unregister_ahash); diff --git a/crypto/algapi.c b/crypto/algapi.c index 9589b3f0041b..fe57b4f696ac 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -442,7 +442,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) return 0; } -int crypto_unregister_alg(struct crypto_alg *alg) +void crypto_unregister_alg(struct crypto_alg *alg) { int ret; LIST_HEAD(list); @@ -451,15 +451,14 @@ int crypto_unregister_alg(struct crypto_alg *alg) ret = crypto_remove_alg(alg, &list); up_write(&crypto_alg_sem); - if (ret) - return ret; + if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) + return; BUG_ON(refcount_read(&alg->cra_refcnt) != 1); if (alg->cra_destroy) alg->cra_destroy(alg); crypto_remove_final(&list); - return 0; } EXPORT_SYMBOL_GPL(crypto_unregister_alg); @@ -483,18 +482,12 @@ err: } EXPORT_SYMBOL_GPL(crypto_register_algs); -int crypto_unregister_algs(struct crypto_alg *algs, int count) +void crypto_unregister_algs(struct crypto_alg *algs, int count) { - int i, ret; - - for (i = 0; i < count; i++) { - ret = crypto_unregister_alg(&algs[i]); - if (ret) - pr_err("Failed to unregister %s %s: %d\n", - algs[i].cra_driver_name, algs[i].cra_name, ret); - } + int i; - return 0; + for (i = 0; i < count; i++) + crypto_unregister_alg(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_algs); @@ -639,7 +632,7 @@ err: } EXPORT_SYMBOL_GPL(crypto_register_instance); -int crypto_unregister_instance(struct crypto_instance *inst) +void crypto_unregister_instance(struct crypto_instance *inst) { LIST_HEAD(list); @@ -651,8 +644,6 @@ int crypto_unregister_instance(struct crypto_instance *inst) up_write(&crypto_alg_sem); crypto_remove_final(&list); - - return 0; } EXPORT_SYMBOL_GPL(crypto_unregister_instance); diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c index b785c476de67..3fa20f12989f 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user_base.c @@ -323,7 +323,8 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, if (refcount_read(&alg->cra_refcnt) > 2) goto drop_alg; - err = crypto_unregister_instance((struct crypto_instance *)alg); + crypto_unregister_instance((struct crypto_instance *)alg); + err = 0; drop_alg: crypto_mod_put(alg); diff --git a/crypto/scompress.c b/crypto/scompress.c index 4d50750d01c6..738f4f8f0f41 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -266,9 +266,9 @@ int crypto_register_scomp(struct scomp_alg *alg) } EXPORT_SYMBOL_GPL(crypto_register_scomp); -int crypto_unregister_scomp(struct scomp_alg *alg) +void crypto_unregister_scomp(struct scomp_alg *alg) { - return crypto_unregister_alg(&alg->base); + crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_scomp); diff --git a/crypto/shash.c b/crypto/shash.c index 8042bb0df9c0..7243f60dab87 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -520,9 +520,9 @@ int crypto_register_shash(struct shash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_register_shash); -int crypto_unregister_shash(struct shash_alg *alg) +void crypto_unregister_shash(struct shash_alg *alg) { - return crypto_unregister_alg(&alg->base); + crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_shash); @@ -546,19 +546,12 @@ err: } EXPORT_SYMBOL_GPL(crypto_register_shashes); -int crypto_unregister_shashes(struct shash_alg *algs, int count) +void crypto_unregister_shashes(struct shash_alg *algs, int count) { - int i, ret; - - for (i = count - 1; i >= 0; --i) { - ret = crypto_unregister_shash(&algs[i]); - if (ret) - pr_err("Failed to unregister %s %s: %d\n", - algs[i].base.cra_driver_name, - algs[i].base.cra_name, ret); - } + int i; - return 0; + for (i = count - 1; i >= 0; --i) + crypto_unregister_shash(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_shashes); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 771a295ac755..25661b4650ec 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -96,7 +96,7 @@ struct crypto_template *crypto_lookup_template(const char *name); int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst); -int crypto_unregister_instance(struct crypto_instance *inst); +void crypto_unregister_instance(struct crypto_instance *inst); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, struct crypto_instance *inst, u32 mask); diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 9de57367afbb..cf478681b53e 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -68,10 +68,8 @@ int crypto_register_acomp(struct acomp_alg *alg); * compression algorithm * * @alg: algorithm definition - * - * Return: zero on success; error code in case of error */ -int crypto_unregister_acomp(struct acomp_alg *alg); +void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index cf8d7f99c93d..d4b1be519590 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -70,7 +70,7 @@ static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) } int crypto_register_ahash(struct ahash_alg *alg); -int crypto_unregister_ahash(struct ahash_alg *alg); +void crypto_unregister_ahash(struct ahash_alg *alg); int crypto_register_ahashes(struct ahash_alg *algs, int count); void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, @@ -105,9 +105,9 @@ static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_register_shash(struct shash_alg *alg); -int crypto_unregister_shash(struct shash_alg *alg); +void crypto_unregister_shash(struct shash_alg *alg); int crypto_register_shashes(struct shash_alg *algs, int count); -int crypto_unregister_shashes(struct shash_alg *algs, int count); +void crypto_unregister_shashes(struct shash_alg *algs, int count); int shash_register_instance(struct crypto_template *tmpl, struct shash_instance *inst); void shash_free_instance(struct crypto_instance *inst); diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 6727ef0fc4d1..f834274c2493 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -112,10 +112,8 @@ int crypto_register_scomp(struct scomp_alg *alg); * compression algorithm * * @alg: algorithm definition - * - * Return: zero on success; error code in case of error */ -int crypto_unregister_scomp(struct scomp_alg *alg); +void crypto_unregister_scomp(struct scomp_alg *alg); int crypto_register_scomps(struct scomp_alg *algs, int count); void crypto_unregister_scomps(struct scomp_alg *algs, int count); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index c23f1eed7970..a905e524e332 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -584,9 +584,9 @@ static inline void crypto_init_wait(struct crypto_wait *wait) * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); -int crypto_unregister_alg(struct crypto_alg *alg); +void crypto_unregister_alg(struct crypto_alg *alg); int crypto_register_algs(struct crypto_alg *algs, int count); -int crypto_unregister_algs(struct crypto_alg *algs, int count); +void crypto_unregister_algs(struct crypto_alg *algs, int count); /* * Algorithm query interface. -- cgit v1.2.3 From 5f567fffaae995dce3498e175e47d5a779fb0270 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 18 Dec 2019 15:53:01 +0800 Subject: crypto: api - Retain alg refcount in crypto_grab_spawn This patch changes crypto_grab_spawn to retain the reference count on the algorithm. This is because the caller needs to access the algorithm parameters and without the reference count the algorithm can be freed at any time. The reference count will be subsequently dropped by the crypto API once the instance has been registered. The helper crypto_drop_spawn will also conditionally drop the reference count depending on whether it has been registered. Note that the code is actually added to crypto_init_spawn. However, unless the caller activates this by setting spawn->dropref beforehand then nothing happens. The only caller that sets dropref is currently crypto_grab_spawn. Once all legacy users of crypto_init_spawn disappear, then we can kill the dropref flag. Internally each instance will maintain a list of its spawns prior to registration. This memory used by this list is shared with other fields that are only used after registration. In order for this to work a new flag spawn->registered is added to indicate whether spawn->inst can be used. Fixes: d6ef2f198d4c ("crypto: api - Add crypto_grab_spawn primitive") Signed-off-by: Herbert Xu --- crypto/algapi.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- include/crypto/algapi.h | 17 +++++++++++++++-- 2 files changed, 55 insertions(+), 10 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index fe57b4f696ac..363849983941 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -124,8 +124,6 @@ static void crypto_remove_instance(struct crypto_instance *inst, return; inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (hlist_unhashed(&inst->list)) - return; if (!tmpl || !crypto_tmpl_get(tmpl)) return; @@ -175,17 +173,26 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, list); inst = spawn->inst; - BUG_ON(&inst->alg == alg); - list_move(&spawn->list, &stack); + spawn->dead = !spawn->registered || &inst->alg != nalg; + + if (!spawn->registered) + break; + + BUG_ON(&inst->alg == alg); if (&inst->alg == nalg) break; - spawn->dead = true; spawns = &inst->alg.cra_users; /* + * Even if spawn->registered is true, the + * instance itself may still be unregistered. + * This is because it may have failed during + * registration. Therefore we still need to + * make the following test. + * * We may encounter an unregistered instance here, since * an instance's spawns are set up prior to the instance * being registered. An unregistered instance will have @@ -208,7 +215,7 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { if (!spawn->dead) list_move(&spawn->list, &spawn->alg->cra_users); - else + else if (spawn->registered) crypto_remove_instance(spawn->inst, list); } } @@ -599,6 +606,7 @@ int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst) { struct crypto_larval *larval; + struct crypto_spawn *spawn; int err; err = crypto_check_alg(&inst->alg); @@ -610,6 +618,23 @@ int crypto_register_instance(struct crypto_template *tmpl, down_write(&crypto_alg_sem); + larval = ERR_PTR(-EAGAIN); + for (spawn = inst->spawns; spawn;) { + struct crypto_spawn *next; + + if (spawn->dead) + goto unlock; + + next = spawn->next; + spawn->inst = inst; + spawn->registered = true; + + if (spawn->dropref) + crypto_mod_put(spawn->alg); + + spawn = next; + } + larval = __crypto_register_alg(&inst->alg); if (IS_ERR(larval)) goto unlock; @@ -655,7 +680,9 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, if (WARN_ON_ONCE(inst == NULL)) return -EINVAL; - spawn->inst = inst; + spawn->next = inst->spawns; + inst->spawns = spawn; + spawn->mask = mask; down_write(&crypto_alg_sem); @@ -697,8 +724,10 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, if (IS_ERR(alg)) return PTR_ERR(alg); + spawn->dropref = true; err = crypto_init_spawn(spawn, alg, spawn->inst, mask); - crypto_mod_put(alg); + if (err) + crypto_mod_put(alg); return err; } EXPORT_SYMBOL_GPL(crypto_grab_spawn); @@ -709,6 +738,9 @@ void crypto_drop_spawn(struct crypto_spawn *spawn) if (!spawn->dead) list_del(&spawn->list); up_write(&crypto_alg_sem); + + if (spawn->dropref && !spawn->registered) + crypto_mod_put(spawn->alg); } EXPORT_SYMBOL_GPL(crypto_drop_spawn); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 25661b4650ec..5022cada4fc6 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -47,7 +47,13 @@ struct crypto_instance { struct crypto_alg alg; struct crypto_template *tmpl; - struct hlist_node list; + + union { + /* Node in list of instances after registration. */ + struct hlist_node list; + /* List of attached spawns before registration. */ + struct crypto_spawn *spawns; + }; void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -67,10 +73,17 @@ struct crypto_template { struct crypto_spawn { struct list_head list; struct crypto_alg *alg; - struct crypto_instance *inst; + union { + /* Back pointer to instance after registration.*/ + struct crypto_instance *inst; + /* Spawn list pointer prior to registration. */ + struct crypto_spawn *next; + }; const struct crypto_type *frontend; u32 mask; bool dead; + bool dropref; + bool registered; }; struct crypto_queue { -- cgit v1.2.3 From b3c16bfc6a79ae517ec3c44be615aed0ffa52c53 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 20 Dec 2019 13:29:40 +0800 Subject: crypto: skcipher - Add skcipher_ialg_simple helper This patch introduces the skcipher_ialg_simple helper which fetches the crypto_alg structure from a simple skcipher instance's spawn. This allows us to remove the third argument from the function skcipher_alloc_instance_simple. In doing so the reference count to the algorithm is now maintained by the Crypto API and the caller no longer needs to drop the alg refcount. Signed-off-by: Herbert Xu --- crypto/cbc.c | 15 +++++++-------- crypto/cfb.c | 5 +++-- crypto/ctr.c | 15 +++++++-------- crypto/ecb.c | 5 ++--- crypto/keywrap.c | 15 +++++++-------- crypto/ofb.c | 5 +++-- crypto/pcbc.c | 5 ++--- crypto/skcipher.c | 9 +++------ include/crypto/internal/skcipher.h | 14 +++++++++++--- 9 files changed, 45 insertions(+), 43 deletions(-) (limited to 'crypto') diff --git a/crypto/cbc.c b/crypto/cbc.c index dd96bcf4d4b6..e6f6273a7d39 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -54,10 +54,12 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); + alg = skcipher_ialg_simple(inst); + err = -EINVAL; if (!is_power_of_2(alg->cra_blocksize)) goto out_free_inst; @@ -66,14 +68,11 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = crypto_cbc_decrypt; err = skcipher_register_instance(tmpl, inst); - if (err) - goto out_free_inst; - goto out_put_alg; - + if (err) { out_free_inst: - inst->free(inst); -out_put_alg: - crypto_mod_put(alg); + inst->free(inst); + } + return err; } diff --git a/crypto/cfb.c b/crypto/cfb.c index 7b68fbb61732..4e5219bbcd19 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -203,10 +203,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); + alg = skcipher_ialg_simple(inst); + /* CFB mode is a stream cipher. */ inst->alg.base.cra_blocksize = 1; @@ -223,7 +225,6 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) inst->free(inst); - crypto_mod_put(alg); return err; } diff --git a/crypto/ctr.c b/crypto/ctr.c index 70a3fccb82f3..1e9d6b86b3c6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -129,10 +129,12 @@ static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); + alg = skcipher_ialg_simple(inst); + /* Block size must be >= 4 bytes. */ err = -EINVAL; if (alg->cra_blocksize < 4) @@ -155,14 +157,11 @@ static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = crypto_ctr_crypt; err = skcipher_register_instance(tmpl, inst); - if (err) - goto out_free_inst; - goto out_put_alg; - + if (err) { out_free_inst: - inst->free(inst); -out_put_alg: - crypto_mod_put(alg); + inst->free(inst); + } + return err; } diff --git a/crypto/ecb.c b/crypto/ecb.c index 9d6981ca7d5d..69a687cbdf21 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -61,10 +61,9 @@ static int crypto_ecb_decrypt(struct skcipher_request *req) static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); @@ -76,7 +75,7 @@ static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) inst->free(inst); - crypto_mod_put(alg); + return err; } diff --git a/crypto/keywrap.c b/crypto/keywrap.c index a155c88105ea..0355cce21b1e 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -266,10 +266,12 @@ static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); + alg = skcipher_ialg_simple(inst); + err = -EINVAL; /* Section 5.1 requirement for KW */ if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) @@ -283,14 +285,11 @@ static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = crypto_kw_decrypt; err = skcipher_register_instance(tmpl, inst); - if (err) - goto out_free_inst; - goto out_put_alg; - + if (err) { out_free_inst: - inst->free(inst); -out_put_alg: - crypto_mod_put(alg); + inst->free(inst); + } + return err; } diff --git a/crypto/ofb.c b/crypto/ofb.c index 133ff4c7f2c6..2ec68e3f2c55 100644 --- a/crypto/ofb.c +++ b/crypto/ofb.c @@ -55,10 +55,12 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); + alg = skcipher_ialg_simple(inst); + /* OFB mode is a stream cipher. */ inst->alg.base.cra_blocksize = 1; @@ -75,7 +77,6 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) inst->free(inst); - crypto_mod_put(alg); return err; } diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 862cdb8d8b6c..ae921fb74dc9 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -153,10 +153,9 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req) static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_alg *alg; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); @@ -166,7 +165,7 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) inst->free(inst); - crypto_mod_put(alg); + return err; } diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 39a718d99220..37adb71f7759 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -938,15 +938,12 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) * * @tmpl: the template being instantiated * @tb: the template parameters - * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is - * returned here. It must be dropped with crypto_mod_put(). * * Return: a pointer to the new instance, or an ERR_PTR(). The caller still * needs to register the instance. */ -struct skcipher_instance * -skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, - struct crypto_alg **cipher_alg_ret) +struct skcipher_instance *skcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_alg *cipher_alg; @@ -982,6 +979,7 @@ skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; + spawn->dropref = true; err = crypto_init_spawn(spawn, cipher_alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); @@ -1003,7 +1001,6 @@ skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.init = skcipher_init_tfm_simple; inst->alg.exit = skcipher_exit_tfm_simple; - *cipher_alg_ret = cipher_alg; return inst; err_free_inst: diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 921c409fe1b1..ad4a6330ff53 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -214,9 +214,17 @@ skcipher_cipher_simple(struct crypto_skcipher *tfm) return ctx->cipher; } -struct skcipher_instance * -skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, - struct crypto_alg **cipher_alg_ret); + +struct skcipher_instance *skcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct crypto_alg *skcipher_ialg_simple( + struct skcipher_instance *inst) +{ + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + + return spawn->alg; +} #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ -- cgit v1.2.3 From 70ffa8fd72b8df7ddfedc0b7db042eea75182fd5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 30 Dec 2019 13:41:15 -0600 Subject: crypto: skcipher - remove skcipher_walk_aead() skcipher_walk_aead() is unused and is identical to skcipher_walk_aead_encrypt(), so remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 9 --------- include/crypto/internal/skcipher.h | 2 -- 2 files changed, 11 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 37adb71f7759..457e4ddc1482 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -549,15 +549,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, return err; } -int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, - bool atomic) -{ - walk->total = req->cryptlen; - - return skcipher_walk_aead_common(walk, req, atomic); -} -EXPORT_SYMBOL_GPL(skcipher_walk_aead); - int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index ad4a6330ff53..df4fdeaa13f3 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -140,8 +140,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk, void skcipher_walk_atomise(struct skcipher_walk *walk); int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req); -int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, - bool atomic); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic); int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, -- cgit v1.2.3 From 674f368a952c48ede71784935a799a5205b92b6c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 30 Dec 2019 21:19:36 -0600 Subject: crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to make the ->setkey() functions provide more information about errors. However, no one actually checks for this flag, which makes it pointless. Also, many algorithms fail to set this flag when given a bad length key. Reviewing just the generic implementations, this is the case for aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309, rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably many more in arch/*/crypto/ and drivers/crypto/. Some algorithms can even set this flag when the key is the correct length. For example, authenc and authencesn set it when the key payload is malformed in any way (not just a bad length), the atmel-sha and ccree drivers can set it if a memory allocation fails, and the chelsio driver sets it for bad auth tag lengths, not just bad key lengths. So even if someone actually wanted to start checking this flag (which seems unlikely, since it's been unused for a long time), there would be a lot of work needed to get it working correctly. But it would probably be much better to go back to the drawing board and just define different return values, like -EINVAL if the key is invalid for the algorithm vs. -EKEYREJECTED if the key was rejected by a policy like "no weak keys". That would be much simpler, less error-prone, and easier to test. So just remove this flag. Signed-off-by: Eric Biggers Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- arch/arm/crypto/aes-ce-glue.c | 14 +------ arch/arm/crypto/crc32-ce-glue.c | 4 +- arch/arm/crypto/ghash-ce-glue.c | 4 +- arch/arm64/crypto/aes-ce-ccm-glue.c | 8 +--- arch/arm64/crypto/aes-ce-glue.c | 8 +--- arch/arm64/crypto/aes-glue.c | 31 +++----------- arch/arm64/crypto/ghash-ce-glue.c | 8 +--- arch/mips/crypto/crc32-mips.c | 4 +- arch/powerpc/crypto/aes-spe-glue.c | 18 ++------- arch/powerpc/crypto/crc32c-vpmsum_glue.c | 4 +- arch/s390/crypto/aes_s390.c | 4 +- arch/s390/crypto/crc32-vx.c | 8 +--- arch/s390/crypto/ghash_s390.c | 4 +- arch/s390/crypto/paes_s390.c | 25 +++--------- arch/sparc/crypto/aes_glue.c | 2 - arch/sparc/crypto/camellia_glue.c | 5 +-- arch/sparc/crypto/crc32c_glue.c | 4 +- arch/x86/crypto/aegis128-aesni-glue.c | 4 +- arch/x86/crypto/aesni-intel_glue.c | 10 ++--- arch/x86/crypto/blake2s-glue.c | 4 +- arch/x86/crypto/camellia_aesni_avx2_glue.c | 3 +- arch/x86/crypto/camellia_aesni_avx_glue.c | 9 ++--- arch/x86/crypto/camellia_glue.c | 9 ++--- arch/x86/crypto/cast6_avx_glue.c | 6 +-- arch/x86/crypto/crc32-pclmul_glue.c | 4 +- arch/x86/crypto/crc32c-intel_glue.c | 4 +- arch/x86/crypto/ghash-clmulni-intel_glue.c | 4 +- arch/x86/crypto/twofish_avx_glue.c | 6 +-- arch/x86/include/asm/crypto/camellia.h | 2 +- crypto/aegis128-core.c | 4 +- crypto/aes_generic.c | 18 +++------ crypto/anubis.c | 2 - crypto/authenc.c | 6 +-- crypto/authencesn.c | 6 +-- crypto/blake2b_generic.c | 4 +- crypto/blake2s_generic.c | 4 +- crypto/camellia_generic.c | 5 +-- crypto/cast6_generic.c | 10 ++--- crypto/cipher.c | 4 +- crypto/crc32_generic.c | 4 +- crypto/crc32c_generic.c | 4 +- crypto/essiv.c | 4 +- crypto/ghash-generic.c | 4 +- crypto/michael_mic.c | 4 +- crypto/skcipher.c | 4 +- crypto/sm4_generic.c | 16 +++----- crypto/twofish_common.c | 8 +--- crypto/vmac.c | 4 +- crypto/xxhash_generic.c | 4 +- .../crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c | 1 - .../crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 1 - .../crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 2 - drivers/crypto/amcc/crypto4xx_alg.c | 11 ++--- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 1 - drivers/crypto/atmel-aes.c | 9 +---- drivers/crypto/axis/artpec6_crypto.c | 8 +--- drivers/crypto/bcm/cipher.c | 3 -- drivers/crypto/caam/caamalg.c | 33 ++++----------- drivers/crypto/caam/caamalg_qi.c | 44 +++++--------------- drivers/crypto/caam/caamalg_qi2.c | 47 +++++----------------- drivers/crypto/caam/caamhash.c | 9 +---- drivers/crypto/cavium/cpt/cptvf_algs.c | 2 - drivers/crypto/cavium/nitrox/nitrox_aead.c | 4 +- drivers/crypto/cavium/nitrox/nitrox_skcipher.c | 12 ++---- drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 1 - drivers/crypto/ccp/ccp-crypto-aes-galois.c | 1 - drivers/crypto/ccp/ccp-crypto-aes.c | 1 - drivers/crypto/ccp/ccp-crypto-sha.c | 4 +- drivers/crypto/ccree/cc_aead.c | 20 +++------ drivers/crypto/ccree/cc_cipher.c | 3 -- drivers/crypto/ccree/cc_hash.c | 6 --- drivers/crypto/chelsio/chcr_algo.c | 16 ++------ drivers/crypto/geode-aes.c | 8 +--- drivers/crypto/inside-secure/safexcel_cipher.c | 38 +++++------------ drivers/crypto/inside-secure/safexcel_hash.c | 16 ++------ drivers/crypto/ixp4xx_crypto.c | 3 -- drivers/crypto/marvell/cipher.c | 4 +- drivers/crypto/mediatek/mtk-aes.c | 2 - drivers/crypto/n2_core.c | 1 - drivers/crypto/padlock-aes.c | 9 +---- drivers/crypto/picoxcell_crypto.c | 6 +-- drivers/crypto/qat/qat_common/qat_algs.c | 6 +-- drivers/crypto/qce/sha.c | 2 - drivers/crypto/rockchip/rk3288_crypto_skcipher.c | 4 +- drivers/crypto/stm32/stm32-crc32.c | 4 +- drivers/crypto/talitos.c | 15 ++----- drivers/crypto/ux500/cryp/cryp_core.c | 2 - drivers/crypto/virtio/virtio_crypto_algs.c | 8 +--- include/crypto/cast6.h | 3 +- include/crypto/internal/des.h | 8 +--- include/crypto/twofish.h | 2 +- include/crypto/xts.h | 8 +--- include/linux/crypto.h | 1 - 93 files changed, 167 insertions(+), 561 deletions(-) (limited to 'crypto') diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index cdb1a07e7ad0..b668c97663ec 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -138,14 +138,8 @@ static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int ret; - - ret = ce_aes_expandkey(ctx, in_key, key_len); - if (!ret) - return 0; - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + return ce_aes_expandkey(ctx, in_key, key_len); } struct crypto_aes_xts_ctx { @@ -167,11 +161,7 @@ static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, if (!ret) ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2], key_len / 2); - if (!ret) - return 0; - - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + return ret; } static int ecb_encrypt(struct skcipher_request *req) diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 95592499b9bd..2208445808d7 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -54,10 +54,8 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *mctx = le32_to_cpup((__le32 *)key); return 0; } diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index c691077679a6..7e8b2f55685c 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -163,10 +163,8 @@ static int ghash_setkey(struct crypto_shash *tfm, struct ghash_key *key = crypto_shash_ctx(tfm); be128 h; - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - } /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 541cf9165748..f6d19b0dc893 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -47,14 +47,8 @@ static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); - int ret; - ret = ce_aes_expandkey(ctx, in_key, key_len); - if (!ret) - return 0; - - tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + return ce_aes_expandkey(ctx, in_key, key_len); } static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c index 6d085dc56c51..56a5f6f0b0c1 100644 --- a/arch/arm64/crypto/aes-ce-glue.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -143,14 +143,8 @@ int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - int ret; - ret = ce_aes_expandkey(ctx, in_key, key_len); - if (!ret) - return 0; - - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + return ce_aes_expandkey(ctx, in_key, key_len); } EXPORT_SYMBOL(ce_aes_setkey); diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index aa57dc639f77..ed5409c6abf4 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -132,13 +132,8 @@ static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int ret; - - ret = aes_expandkey(ctx, in_key, key_len); - if (ret) - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return ret; + return aes_expandkey(ctx, in_key, key_len); } static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm, @@ -155,11 +150,7 @@ static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm, if (!ret) ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2], key_len / 2); - if (!ret) - return 0; - - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + return ret; } static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, @@ -173,19 +164,12 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, ret = aes_expandkey(&ctx->key1, in_key, key_len); if (ret) - goto out; + return ret; desc->tfm = ctx->hash; crypto_shash_digest(desc, in_key, key_len, digest); - ret = aes_expandkey(&ctx->key2, digest, sizeof(digest)); - if (ret) - goto out; - - return 0; -out: - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + return aes_expandkey(&ctx->key2, digest, sizeof(digest)); } static int __maybe_unused ecb_encrypt(struct skcipher_request *req) @@ -791,13 +775,8 @@ static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key, unsigned int key_len) { struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); - int err; - err = aes_expandkey(&ctx->key, in_key, key_len); - if (err) - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - - return err; + return aes_expandkey(&ctx->key, in_key, key_len); } static void cmac_gf128_mul_by_x(be128 *y, const be128 *x) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 196aedd0c20c..22831d3b7f62 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -248,10 +248,8 @@ static int ghash_setkey(struct crypto_shash *tfm, { struct ghash_key *key = crypto_shash_ctx(tfm); - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - } return __ghash_setkey(key, inkey, keylen); } @@ -306,10 +304,8 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, int ret; ret = aes_expandkey(&ctx->aes_key, inkey, keylen); - if (ret) { - tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (ret) return -EINVAL; - } aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){}); diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c index 7d1d2425746f..faa88a6a74c0 100644 --- a/arch/mips/crypto/crc32-mips.c +++ b/arch/mips/crypto/crc32-mips.c @@ -177,10 +177,8 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, { struct chksum_ctx *mctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(mctx->key)) return -EINVAL; - } mctx->key = get_unaligned_le32(key); return 0; } diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index 1fad5d4c658d..c2b23b69d7b1 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -94,13 +94,6 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, { struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); - if (key_len != AES_KEYSIZE_128 && - key_len != AES_KEYSIZE_192 && - key_len != AES_KEYSIZE_256) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - switch (key_len) { case AES_KEYSIZE_128: ctx->rounds = 4; @@ -114,6 +107,8 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, ctx->rounds = 6; ppc_expand_key_256(ctx->key_enc, in_key); break; + default: + return -EINVAL; } ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); @@ -139,13 +134,6 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, key_len >>= 1; - if (key_len != AES_KEYSIZE_128 && - key_len != AES_KEYSIZE_192 && - key_len != AES_KEYSIZE_256) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - switch (key_len) { case AES_KEYSIZE_128: ctx->rounds = 4; @@ -162,6 +150,8 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, ppc_expand_key_256(ctx->key_enc, in_key); ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256); break; + default: + return -EINVAL; } ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 2c232898b933..63760b7dbb76 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -73,10 +73,8 @@ static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *mctx = le32_to_cpup((__le32 *)key); return 0; } diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index ead0b2c9881d..2db167e5871c 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -414,10 +414,8 @@ static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, return err; /* In fips mode only 128 bit or 256 bit keys are valid */ - if (fips_enabled && key_len != 32 && key_len != 64) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (fips_enabled && key_len != 32 && key_len != 64) return -EINVAL; - } /* Pick the correct function code based on the key length */ fc = (key_len == 32) ? CPACF_KM_XTS_128 : diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 423ee05887e6..fafecad20752 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -111,10 +111,8 @@ static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey, { struct crc_ctx *mctx = crypto_shash_ctx(tfm); - if (newkeylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (newkeylen != sizeof(mctx->key)) return -EINVAL; - } mctx->key = le32_to_cpu(*(__le32 *)newkey); return 0; } @@ -124,10 +122,8 @@ static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey, { struct crc_ctx *mctx = crypto_shash_ctx(tfm); - if (newkeylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (newkeylen != sizeof(mctx->key)) return -EINVAL; - } mctx->key = be32_to_cpu(*(__be32 *)newkey); return 0; } diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index a3e7400e031c..6b07a2f1ce8a 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -43,10 +43,8 @@ static int ghash_setkey(struct crypto_shash *tfm, { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - } memcpy(ctx->key, key, GHASH_BLOCK_SIZE); diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index c7119c617b6e..e2a85783f804 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -151,11 +151,7 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, if (rc) return rc; - if (__paes_set_key(ctx)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return 0; + return __paes_set_key(ctx); } static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) @@ -254,11 +250,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, if (rc) return rc; - if (__cbc_paes_set_key(ctx)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return 0; + return __cbc_paes_set_key(ctx); } static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) @@ -386,10 +378,9 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, if (rc) return rc; - if (__xts_paes_set_key(ctx)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } + rc = __xts_paes_set_key(ctx); + if (rc) + return rc; /* * xts_check_key verifies the key length is not odd and makes @@ -526,11 +517,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, if (rc) return rc; - if (__ctr_paes_set_key(ctx)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - return 0; + return __ctr_paes_set_key(ctx); } static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 0f5a501c95a9..e3d2138ff9e2 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -169,7 +169,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; switch (key_len) { case AES_KEYSIZE_128: @@ -188,7 +187,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, break; default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 1700f863748c..aaa9714378e6 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -39,12 +39,9 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key, { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u32 *in_key = (const u32 *) _in_key; - u32 *flags = &tfm->crt_flags; - if (key_len != 16 && key_len != 24 && key_len != 32) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; - } ctx->key_len = key_len; diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index 1299073285a3..4e9323229e71 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -33,10 +33,8 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *(__le32 *)mctx = le32_to_cpup((__le32 *)key); return 0; } diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 46d227122643..4623189000d8 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -144,10 +144,8 @@ static int crypto_aegis128_aesni_setkey(struct crypto_aead *aead, const u8 *key, { struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead); - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AEGIS128_KEY_SIZE) return -EINVAL; - } memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 670f8fcf2544..bbbebbd35b5d 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -316,14 +316,11 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); - u32 *flags = &tfm->crt_flags; int err; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && - key_len != AES_KEYSIZE_256) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + key_len != AES_KEYSIZE_256) return -EINVAL; - } if (!crypto_simd_usable()) err = aes_expandkey(ctx, in_key, key_len); @@ -641,10 +638,9 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); - if (key_len < 4) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (key_len < 4) return -EINVAL; - } + /*Account for 4 byte nonce at the end.*/ key_len -= 4; diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index 1d9ff8a45e1f..06ef2d4a4701 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -64,10 +64,8 @@ static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, { struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) return -EINVAL; - } memcpy(tctx->key, key, keylen); tctx->keylen = keylen; diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index a8cc2c83fe1b..ccda647422d6 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -142,8 +142,7 @@ static const struct common_glue_ctx camellia_dec_xts = { static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen, - &tfm->base.crt_flags); + return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen); } static int ecb_encrypt(struct skcipher_request *req) diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 31a82a79f4ac..4e5de6ef206e 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -144,8 +144,7 @@ static const struct common_glue_ctx camellia_dec_xts = { static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen, - &tfm->base.crt_flags); + return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen); } static int ecb_encrypt(struct skcipher_request *req) @@ -177,7 +176,6 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; int err; err = xts_verify_key(tfm, key, keylen); @@ -185,13 +183,12 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, return err; /* first half of xts-key is for crypt */ - err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); + err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ - return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, - flags); + return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); } EXPORT_SYMBOL_GPL(xts_camellia_setkey); diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 5f3ed5af68d7..242c056e5fa8 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -1227,12 +1227,10 @@ static void camellia_setup192(const unsigned char *key, u64 *subkey) } int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { - if (key_len != 16 && key_len != 24 && key_len != 32) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; - } cctx->key_length = key_len; @@ -1255,8 +1253,7 @@ EXPORT_SYMBOL_GPL(__camellia_setkey); static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { - return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len, - &tfm->crt_flags); + return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len); } static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index da5297475f9e..48e0f37796fa 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -173,7 +173,6 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; int err; err = xts_verify_key(tfm, key, keylen); @@ -181,13 +180,12 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key, return err; /* first half of xts-key is for crypt */ - err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); + err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ - return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, - flags); + return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); } static int xts_encrypt(struct skcipher_request *req) diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index cb4ab6645106..418bd88acac8 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -94,10 +94,8 @@ static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *mctx = le32_to_cpup((__le32 *)key); return 0; } diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index eefa0862f309..c20d1b8a82c3 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -91,10 +91,8 @@ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *mctx = le32_to_cpup((__le32 *)key); return 0; } diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 04d72a5a8ce9..4a9c9833a7d6 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -57,10 +57,8 @@ static int ghash_setkey(struct crypto_shash *tfm, be128 *x = (be128 *)key; u64 a, b; - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - } /* perform multiplication by 'x' in GF(2^128) */ a = be64_to_cpu(x->a); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 3b36e97ec7ab..2dbc8ce3730e 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -64,7 +64,6 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; int err; err = xts_verify_key(tfm, key, keylen); @@ -72,13 +71,12 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key, return err; /* first half of xts-key is for crypt */ - err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); + err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ - return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, - flags); + return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); } static const struct common_glue_ctx twofish_enc = { diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index f1592619dd65..f6d91861cb14 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -26,7 +26,7 @@ struct camellia_xts_ctx { extern int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key, - unsigned int key_len, u32 *flags); + unsigned int key_len); extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 71c11cb5bad1..44fb4956f0dd 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -372,10 +372,8 @@ static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, { struct aegis_ctx *ctx = crypto_aead_ctx(aead); - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AEGIS128_KEY_SIZE) return -EINVAL; - } memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); return 0; diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 22e5867177f1..27ab27931813 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1127,24 +1127,18 @@ EXPORT_SYMBOL_GPL(crypto_it_tab); * @in_key: The input key. * @key_len: The size of the key. * - * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm - * is set. The function uses aes_expand_key() to expand the key. - * &crypto_aes_ctx _must_ be the private data embedded in @tfm which is - * retrieved with crypto_tfm_ctx(). + * This function uses aes_expand_key() to expand the key. &crypto_aes_ctx + * _must_ be the private data embedded in @tfm which is retrieved with + * crypto_tfm_ctx(). + * + * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths) */ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - int ret; - - ret = aes_expandkey(ctx, in_key, key_len); - if (!ret) - return 0; - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + return aes_expandkey(ctx, in_key, key_len); } EXPORT_SYMBOL_GPL(crypto_aes_set_key); diff --git a/crypto/anubis.c b/crypto/anubis.c index f9ce78fde6ee..5da0241ef453 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -464,7 +464,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; - u32 *flags = &tfm->crt_flags; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; @@ -474,7 +473,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, case 32: case 36: case 40: break; default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/crypto/authenc.c b/crypto/authenc.c index 3f0ed9402582..0da80632e872 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -91,7 +91,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, int err = -EINVAL; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) - goto badkey; + goto out; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & @@ -113,10 +113,6 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) diff --git a/crypto/authencesn.c b/crypto/authencesn.c index adb7554fca29..749527e1b617 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * int err = -EINVAL; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) - goto badkey; + goto out; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & @@ -87,10 +87,6 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static int crypto_authenc_esn_genicv_tail(struct aead_request *req, diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index d04b1788dc42..1d262374fa4e 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -147,10 +147,8 @@ static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key, { struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) return -EINVAL; - } memcpy(tctx->key, key, keylen); tctx->keylen = keylen; diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c index ed0c74640470..005783ff45ad 100644 --- a/crypto/blake2s_generic.c +++ b/crypto/blake2s_generic.c @@ -17,10 +17,8 @@ static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, { struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) return -EINVAL; - } memcpy(tctx->key, key, keylen); tctx->keylen = keylen; diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index b6a1121e2478..9a5783e5196a 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -970,12 +970,9 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); const unsigned char *key = (const unsigned char *)in_key; - u32 *flags = &tfm->crt_flags; - if (key_len != 16 && key_len != 24 && key_len != 32) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; - } cctx->key_length = key_len; diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 85328522c5ca..c77ff6c8a2b2 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -103,17 +103,14 @@ static inline void W(u32 *key, unsigned int i) key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); } -int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, - unsigned key_len, u32 *flags) +int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, unsigned int key_len) { int i; u32 key[8]; __be32 p_key[8]; /* padded key */ - if (key_len % 4 != 0) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len % 4 != 0) return -EINVAL; - } memset(p_key, 0, 32); memcpy(p_key, in_key, key_len); @@ -148,8 +145,7 @@ EXPORT_SYMBOL_GPL(__cast6_setkey); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { - return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen, - &tfm->crt_flags); + return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen); } EXPORT_SYMBOL_GPL(cast6_setkey); diff --git a/crypto/cipher.c b/crypto/cipher.c index aadd51cb7250..0fb7042a709d 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -46,10 +46,8 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, unsigned long alignmask = crypto_cipher_alignmask(tfm); crypto_cipher_clear_flags(tfm, CRYPTO_TFM_RES_MASK); - if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { - crypto_cipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) return -EINVAL; - } if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index 9e97912280bd..0e103fb5dd77 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -60,10 +60,8 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key, { u32 *mctx = crypto_shash_ctx(hash); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } *mctx = get_unaligned_le32(key); return 0; } diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 7b25fe82072c..7fa9b0788685 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -74,10 +74,8 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, { struct chksum_ctx *mctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(mctx->key)) return -EINVAL; - } mctx->key = get_unaligned_le32(key); return 0; } diff --git a/crypto/essiv.c b/crypto/essiv.c index e4b32c2ea7ec..f49bd6fc6972 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -117,10 +117,8 @@ static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (err) return err; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) return -EINVAL; - } desc->tfm = tctx->hash; err = crypto_shash_init(desc) ?: diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 5027b3461c92..c70d163c1ac9 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -58,10 +58,8 @@ static int ghash_setkey(struct crypto_shash *tfm, struct ghash_ctx *ctx = crypto_shash_ctx(tfm); be128 k; - if (keylen != GHASH_BLOCK_SIZE) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; - } if (ctx->gf128) gf128mul_free_4k(ctx->gf128); diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 20e6220f46f6..63350c4ad461 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -137,10 +137,8 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key, const __le32 *data = (const __le32 *)key; - if (keylen != 8) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != 8) return -EINVAL; - } mctx->l = le32_to_cpu(data[0]); mctx->r = le32_to_cpu(data[1]); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 457e4ddc1482..8c8735f75478 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -603,10 +603,8 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned long alignmask = crypto_skcipher_alignmask(tfm); int err; - if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) return -EINVAL; - } if ((unsigned long)key & alignmask) err = skcipher_setkey_unaligned(tfm, key, keylen); diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 71ffb343709a..016dbc595705 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -143,29 +143,23 @@ int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, EXPORT_SYMBOL_GPL(crypto_sm4_expand_key); /** - * crypto_sm4_set_key - Set the AES key. + * crypto_sm4_set_key - Set the SM4 key. * @tfm: The %crypto_tfm that is used in the context. * @in_key: The input key. * @key_len: The size of the key. * - * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm - * is set. The function uses crypto_sm4_expand_key() to expand the key. + * This function uses crypto_sm4_expand_key() to expand the key. * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is * retrieved with crypto_tfm_ctx(). + * + * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths) */ int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - int ret; - - ret = crypto_sm4_expand_key(ctx, in_key, key_len); - if (!ret) - return 0; - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + return crypto_sm4_expand_key(ctx, in_key, key_len); } EXPORT_SYMBOL_GPL(crypto_sm4_set_key); diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index 222fc765c57a..d23fa531b91f 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -567,7 +567,7 @@ static const u8 calc_sb_tbl[512] = { /* Perform the key setup. */ int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { int i, j, k; @@ -584,10 +584,7 @@ int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, /* Check key length. */ if (key_len % 8) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; /* unsupported key length */ - } /* Compute the first two words of the S vector. The magic numbers are * the entries of the RS matrix, preprocessed through poly_to_exp. The @@ -688,8 +685,7 @@ EXPORT_SYMBOL_GPL(__twofish_setkey); int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { - return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len, - &tfm->crt_flags); + return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len); } EXPORT_SYMBOL_GPL(twofish_setkey); diff --git a/crypto/vmac.c b/crypto/vmac.c index f50a85060b39..0bbb34dc87c4 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -435,10 +435,8 @@ static int vmac_setkey(struct crypto_shash *tfm, unsigned int i; int err; - if (keylen != VMAC_KEY_LEN) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != VMAC_KEY_LEN) return -EINVAL; - } err = crypto_cipher_setkey(tctx->cipher, key, keylen); if (err) diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c index 4aad2c0f40a9..55d1c8a76127 100644 --- a/crypto/xxhash_generic.c +++ b/crypto/xxhash_generic.c @@ -22,10 +22,8 @@ static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(tctx->seed)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(tctx->seed)) return -EINVAL; - } tctx->seed = get_unaligned_le64(key); return 0; } diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index cb2b0874f68f..7f22d305178e 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } op->keylen = keylen; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 37d0b6c386a0..b102da74b731 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -394,7 +394,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index f222979a5623..84d52fc3a2da 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { @@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (unlikely(keylen != 3 * DES_KEY_SIZE)) { dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a42f8619589d..121eb81df64f 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, struct dynamic_sa_ctl *sa; int rc; - if (keylen != AES_KEYSIZE_256 && - keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) { - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_128) return -EINVAL; - } /* Create SA */ if (ctx->sa_in || ctx->sa_out) @@ -551,10 +548,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, struct dynamic_sa_ctl *sa; int rc = 0; - if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) { - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) return -EINVAL; - } rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen); if (rc) diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index e589015aac1c..9819dd50fbad 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index b001fdcd9d95..898f66cb2eb2 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1140,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_256) return -EINVAL; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -1716,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_128) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_128) return -EINVAL; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -2073,7 +2069,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 22ebe40f09f5..fcf1effc7661 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, { struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); - if (len != 16 && len != 24 && len != 32) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != 16 && len != 24 && len != 32) return -EINVAL; - } ctx->key_length = len; @@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, case 32: break; default: - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, case 64: break; default: - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 1564a6f8c9cb..184a3e1245cf 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, ctx->cipher_type = CIPHER_TYPE_AES256; break; default: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && @@ -2916,7 +2915,6 @@ badkey: ctx->authkeylen = 0; ctx->digestsize = 0; - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -2992,7 +2990,6 @@ badkey: ctx->authkeylen = 0; ctx->digestsize = 0; - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 2912006b946b..ef1a65f4fc92 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; - if (keylen != CHACHA_KEY_SIZE + saltlen) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - } ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; @@ -619,7 +617,6 @@ skip_split_key: memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen - 4); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen - 4); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); return -EINVAL; } diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 8e3449670d2f..4a29e0ef9d63 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -268,7 +268,6 @@ skip_split_key: memzero_explicit(&keys, sizeof(keys)); return ret; badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; + return -EINVAL; } } @@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; + return -EINVAL; } } return ret; -badkey: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, @@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(jrdev, "key size mismatch\n"); - goto badkey; + return -EINVAL; } ctx->cdata.keylen = keylen; @@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; + return -EINVAL; } } @@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; + return -EINVAL; } } return ret; -badkey: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } /* diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 3aeacc36ce23..fe2a628e8905 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + goto out; err = -EINVAL; if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; + goto out; err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: aead_setkey(aead, key, keylen); @@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, @@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; - if (keylen != CHACHA_KEY_SIZE + saltlen) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - } ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; @@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - if (keylen != CHACHA_KEY_SIZE) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE) return -EINVAL; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(dev, "key size mismatch\n"); - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -3277,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, return ret; bad_free_key: kfree(hashed_key); - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 50a8852ad276..8d9143407fc5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -473,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, return ahash_set_sh_desc(ahash); bad_free_key: kfree(hashed_key); - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -483,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct device *jrdev = ctx->jrdev; - if (keylen != AES_KEYSIZE_128) { - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AES_KEYSIZE_128) return -EINVAL; - } memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, @@ -506,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } /* key is immediate data for all cmac shared descriptors */ ctx->adata.key_virt = key; diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1ad66677d88e..1be1adffff1d 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key, memcpy(ctx->enc_key, key, keylen); return 0; } else { - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } } diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index 6f80cc3b5c84..dce5423a5883 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, union fc_ctx_flags flags; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } /* fill crypto context */ fctx = nctx->u.fctx; diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 97af4d50d003..18088b0a2257 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, int aes_keylen; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } @@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, keylen /= 2; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } fctx = nctx->u.fctx; /* copy KEY2 */ @@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, keylen -= CTR_RFC3686_NONCE_SIZE; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 32f19f402073..5eba7ee49e81 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->u.aes.mode = alg->mode; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index ff50ee80d223..9e8f07c1afac 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 33328a153225..51e12fbd1159 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->u.aes.mode = alg->mode; diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 453b9797f93f..474e6f1a6a84 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, ret = crypto_shash_digest(sdesc, key, key_len, ctx->u.sha.key); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return -EINVAL; - } key_len = digest_size; } else { diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index b0085db7e211..d014c8e063a7 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = crypto_authenc_extractkeys(&keys, key, keylen); if (rc) - goto badkey; + return rc; enckey = keys.enckey; authkey = keys.authkey; ctx->enc_keylen = keys.enckeylen; @@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ - rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) - goto badkey; + return -EINVAL; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ @@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = validate_keys_sizes(ctx); if (rc) - goto badkey; + return rc; /* STAT_PHASE_1: Copy key to ctx */ @@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) - goto badkey; + return rc; } /* STAT_PHASE_2: Create sequence */ @@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, break; /* No auth. key setup */ default: dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); - rc = -ENOTSUPP; - goto badkey; + return -ENOTSUPP; } /* STAT_PHASE_3: Submit sequence to HW */ @@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); if (rc) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); - goto setkey_error; + return rc; } } /* Update STAT_PHASE_3 */ return rc; - -badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - -setkey_error: - return rc; } static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 61b9dcaa0c05..7493a32f12b9 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, /* This check the size of the protected key token */ if (keylen != sizeof(hki)) { dev_err(dev, "Unsupported protected key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -304,7 +303,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, if (validate_keys_sizes(ctx_p, keylen)) { dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -395,7 +393,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, if (validate_keys_sizes(ctx_p, keylen)) { dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index aee5db5f8538..912e5ce5079d 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); out: - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - if (ctx->key_params.key_dma_addr) { dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); @@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 5b7dbe7cdb17..720b2ff55464 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -912,7 +912,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -943,7 +942,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -981,7 +979,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -2174,7 +2171,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -3284,7 +3280,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3322,7 +3317,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, int error; if (keylen < 3) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3372,7 +3366,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; @@ -3429,10 +3422,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } if (get_alg_config(¶m, max_authsize)) { pr_err("chcr : Unsupported digest size\n"); @@ -3559,10 +3550,9 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } + subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 73a899e6f837..eb6e6b618361 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -119,11 +119,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, return 0; } - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) /* not supported at all */ - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; - } /* * The requested key size is not supported by HW, do a fallback @@ -154,11 +152,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return 0; } - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) /* not supported at all */ - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; - } /* * The requested key size is not supported by HW, do a fallback diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index f4ece0d8bd6c..5ee66532f336 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -380,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < len / sizeof(u32); i++) { @@ -433,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, case SAFEXCEL_DES: err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); if (unlikely(err)) - goto badkey_expflags; + goto badkey; break; case SAFEXCEL_3DES: err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); if (unlikely(err)) - goto badkey_expflags; + goto badkey; break; case SAFEXCEL_AES: err = aes_expandkey(&aes, keys.enckey, keys.enckeylen); @@ -521,8 +519,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -badkey_expflags: memzero_explicit(&keys, sizeof(keys)); return err; } @@ -1444,10 +1440,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, /* exclude the nonce here */ keylen = len - CTR_RFC3686_NONCE_SIZE; ret = aes_expandkey(&aes, key, keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -2459,10 +2453,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, /* Only half of the key data is cipher key */ keylen = (len >> 1); ret = aes_expandkey(&aes, key, keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -2478,10 +2470,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, /* The other half is the tweak key */ ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -2570,7 +2560,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, ret = aes_expandkey(&aes, key, len); if (ret) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&aes, sizeof(aes)); return ret; } @@ -2684,7 +2673,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, ret = aes_expandkey(&aes, key, len); if (ret) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&aes, sizeof(aes)); return ret; } @@ -2815,10 +2803,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm, { struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); - if (len != CHACHA_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != CHACHA_KEY_SIZE) return -EINVAL; - } + safexcel_chacha20_setkey(ctx, key); return 0; @@ -2872,10 +2859,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm, len -= EIP197_AEAD_IPSEC_NONCE_SIZE; ctx->nonce = *(u32 *)(key + len); } - if (len != CHACHA_KEY_SIZE) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != CHACHA_KEY_SIZE) return -EINVAL; - } + safexcel_chacha20_setkey(ctx, key); return 0; @@ -3070,10 +3056,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm, struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - if (len != SM4_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != SM4_KEY_SIZE) return -EINVAL; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, SM4_KEY_SIZE)) diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 25e49d1c96e8..088d7f8aab5e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -1919,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key, { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - if (keylen != sizeof(u32)) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } memcpy(ctx->ipad, key, sizeof(u32)); return 0; @@ -1995,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE); for (i = 0; i < len / sizeof(u32); i++) @@ -2065,10 +2061,8 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } /* precompute the XCBC key material */ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); @@ -2168,10 +2162,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } for (i = 0; i < len / sizeof(u32); i++) ctx->ipad[i + 8] = diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 391e3b4df364..f64bde506ae8 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -740,7 +740,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, u32 keylen_cfg = 0; struct ix_sa_dir *dir; struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; dir = encrypt ? &ctx->encrypt : &ctx->decrypt; cinfo = dir->npe_ctx; @@ -757,7 +756,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, case 24: keylen_cfg = MOD_AES192; break; case 32: keylen_cfg = MOD_AES256; break; default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } cipher_cfg |= keylen_cfg; @@ -1169,7 +1167,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_setup(tfm, crypto_aead_authsize(tfm)); badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index d8e8c857770c..c24f34a48cef 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, int i; ret = aes_expandkey(&ctx->aes, key, len); - if (ret) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } remaining = (ctx->aes.key_length - 16) / 4; offset = ctx->aes.key_length + 24 - remaining; diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 90880a81c534..00e580bf8536 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm, break; default: - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, break; default: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 63bd565048f4..f5c468f2cc82 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->enc_type |= ENC_TYPE_ALG_AES256; break; default: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c5b60f50e1b5..594d6b1695d5 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; - u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; int cpu; - if (key_len % 8) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len % 8) return -EINVAL; - } /* * If the hardware is capable of generating the extended key @@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ctx->cword.encrypt.keygen = 1; ctx->cword.decrypt.keygen = 1; - if (aes_expandkey(&gen_aes, in_key, key_len)) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (aes_expandkey(&gen_aes, in_key, key_len)) return -EINVAL; - } memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index d187312b9864..ced4cbed9ea0 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -490,7 +490,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -780,10 +779,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); int err = 0; - if (len > AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len > AES_MAX_KEY_SIZE) return -EINVAL; - } /* * IPSec engine only supports 128 and 256 bit AES keys. If we get a @@ -830,7 +827,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher, int err = 0; if (len > AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); err = -EINVAL; goto out; } diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 35bca76b640f..833bb1d3a11b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return 0; bad_key: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; error: @@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, int alg; if (qat_alg_validate_key(keylen, &alg, mode)) - goto bad_key; + return -EINVAL; qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode); qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode); return 0; -bad_key: - crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 95ab16fc8fd6..1ab62e7d5f3c 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); kfree(buf); err_free_req: diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index ca4de4ddfe1f..4a75c8e1fa6c 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_256) return -EINVAL; - } ctx->keylen = keylen; memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); return 0; diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 9e11c3480353..8e92e4ac79f1 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, { struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } mctx->key = get_unaligned_le32(key); return 0; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index d71d65846e47..9c6db7f698c4 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc, return 0; badkey: - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc, err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + goto out; err = -EINVAL; if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) - goto badkey; + goto out; err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen); if (err) @@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc, out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static void talitos_sg_unmap(struct device *dev, @@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher, keylen == AES_KEYSIZE_256) return skcipher_setkey(cipher, key, keylen); - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } @@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, /* Must get the hash of the long key */ ret = keyhash(tfm, key, keylen, hash); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return -EINVAL; - } keysize = digestsize; memcpy(ctx->key, hash, digestsize); diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 95fb694a2667..800dfc4d16c4 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); @@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher, default: pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 4b71e80951b7..fd045e64972a 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions( if (keylen > vcrypto->max_cipher_key_len) { pr_err("virtio_crypto: the key is too long\n"); - goto bad_key; + return -EINVAL; } if (virtio_crypto_alg_validate_key(keylen, &alg)) - goto bad_key; + return -EINVAL; /* Create encryption session */ ret = virtio_crypto_alg_skcipher_init_session(ctx, @@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions( return ret; } return 0; - -bad_key: - crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } /* Note: kernel crypto API realization */ diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h index 4c8d0c72f78d..38f490cd50a8 100644 --- a/include/crypto/cast6.h +++ b/include/crypto/cast6.h @@ -15,8 +15,7 @@ struct cast6_ctx { u8 Kr[12][4]; }; -int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, - unsigned int keylen, u32 *flags); +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h index f62a2bb1866b..355ddaae3806 100644 --- a/include/crypto/internal/des.h +++ b/include/crypto/internal/des.h @@ -120,20 +120,16 @@ static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, int keylen) { - if (keylen != DES_KEY_SIZE) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != DES_KEY_SIZE) return -EINVAL; - } return crypto_des_verify_key(crypto_aead_tfm(tfm), key); } static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, int keylen) { - if (keylen != DES3_EDE_KEY_SIZE) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != DES3_EDE_KEY_SIZE) return -EINVAL; - } return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); } diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h index 2e2c09673d88..f6b307a58554 100644 --- a/include/crypto/twofish.h +++ b/include/crypto/twofish.h @@ -19,7 +19,7 @@ struct twofish_ctx { }; int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, - unsigned int key_len, u32 *flags); + unsigned int key_len); int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); #endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 15ae7fdc0478..57b2c52928db 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -17,10 +17,8 @@ static inline int xts_check_key(struct crypto_tfm *tfm, * key consists of keys of equal size concatenated, therefore * the length must be even. */ - if (keylen % 2) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (keylen % 2) return -EINVAL; - } /* ensure that the AES and tweak key are not identical */ if (fips_enabled && @@ -39,10 +37,8 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm, * key consists of keys of equal size concatenated, therefore * the length must be even. */ - if (keylen % 2) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen % 2) return -EINVAL; - } /* ensure that the AES and tweak key are not identical */ if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 719a301af3f2..61fccc7d0efb 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -113,7 +113,6 @@ #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 -#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 /* * Miscellaneous stuff. -- cgit v1.2.3 From c4c4db0d59774f6ab726edd012711490437345c2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 30 Dec 2019 21:19:37 -0600 Subject: crypto: remove CRYPTO_TFM_RES_WEAK_KEY The CRYPTO_TFM_RES_WEAK_KEY flag was apparently meant as a way to make the ->setkey() functions provide more information about errors. However, no one actually checks for this flag, which makes it pointless. There are also no tests that verify that all algorithms actually set (or don't set) it correctly. This is also the last remaining CRYPTO_TFM_RES_* flag, which means that it's the only thing still needing all the boilerplate code which propagates these flags around from child => parent tfms. And if someone ever needs to distinguish this error in the future (which is somewhat unlikely, as it's been unneeded for a long time), it would be much better to just define a new return value like -EKEYREJECTED. That would be much simpler, less error-prone, and easier to test. So just remove this flag. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/des_generic.c | 10 ++-------- drivers/crypto/ixp4xx_crypto.c | 28 ++++------------------------ include/crypto/internal/des.h | 15 +++------------ include/crypto/xts.h | 11 ++--------- include/linux/crypto.h | 1 - 5 files changed, 11 insertions(+), 54 deletions(-) (limited to 'crypto') diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 6e13a4a29ecb..c85354a5e94c 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -29,11 +29,8 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, else err = 0; } - - if (err) { + if (err) memset(dctx, 0, sizeof(*dctx)); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - } return err; } @@ -64,11 +61,8 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, else err = 0; } - - if (err) { + if (err) memset(dctx, 0, sizeof(*dctx)); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - } return err; } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index f64bde506ae8..ad73fc946682 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -740,6 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, u32 keylen_cfg = 0; struct ix_sa_dir *dir; struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); + int err; dir = encrypt ? &ctx->encrypt : &ctx->decrypt; cinfo = dir->npe_ctx; @@ -760,7 +761,9 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, } cipher_cfg |= keylen_cfg; } else { - crypto_des_verify_key(tfm, key); + err = crypto_des_verify_key(tfm, key); + if (err) + return err; } /* write cfg word to cryptinfo */ *(u32*)cinfo = cpu_to_be32(cipher_cfg); @@ -817,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; int ret; init_completion(&ctx->completion); @@ -833,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, if (ret) goto out; ret = setup_cipher(&tfm->base, 1, key, key_len); - if (ret) - goto out; - - if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - ret = -EINVAL; - } else { - *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; - } - } out: if (!atomic_dec_and_test(&ctx->configuring)) wait_for_completion(&ctx->completion); @@ -1094,7 +1086,6 @@ free_buf_src: static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; unsigned digest_len = crypto_aead_maxauthsize(tfm); int ret; @@ -1118,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) goto out; ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, ctx->authkey_len, digest_len); - if (ret) - goto out; - - if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - ret = -EINVAL; - goto out; - } else { - *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; - } - } out: if (!atomic_dec_and_test(&ctx->configuring)) wait_for_completion(&ctx->completion); diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h index 355ddaae3806..723fe5bf16da 100644 --- a/include/crypto/internal/des.h +++ b/include/crypto/internal/des.h @@ -35,10 +35,6 @@ static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) else err = 0; } - - if (err) - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - memzero_explicit(&tmp, sizeof(tmp)); return err; } @@ -95,14 +91,9 @@ bad: static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, const u8 *key) { - int err; - - err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, - crypto_tfm_get_flags(tfm) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); - if (err) - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); - return err; + return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); } static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 57b2c52928db..0f8dba69feb4 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -11,8 +11,6 @@ static inline int xts_check_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { - u32 *flags = &tfm->crt_flags; - /* * key consists of keys of equal size concatenated, therefore * the length must be even. @@ -21,11 +19,8 @@ static inline int xts_check_key(struct crypto_tfm *tfm, return -EINVAL; /* ensure that the AES and tweak key are not identical */ - if (fips_enabled && - !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; + if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2)) return -EINVAL; - } return 0; } @@ -43,10 +38,8 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm, /* ensure that the AES and tweak key are not identical */ if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) && - !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) return -EINVAL; - } return 0; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 61fccc7d0efb..accd0c8038fd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -112,7 +112,6 @@ #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 -#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 /* * Miscellaneous stuff. -- cgit v1.2.3 From af5034e8e4a5838fc77e476c1a91822e449d5869 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 30 Dec 2019 21:19:38 -0600 Subject: crypto: remove propagation of CRYPTO_TFM_RES_* flags The CRYPTO_TFM_RES_* flags were apparently meant as a way to make the ->setkey() functions provide more information about errors. But these flags weren't actually being used or tested, and in many cases they weren't being set correctly anyway. So they've now been removed. Also, if someone ever actually needs to start better distinguishing ->setkey() errors (which is somewhat unlikely, as this has been unneeded for a long time), we'd be much better off just defining different return values, like -EINVAL if the key is invalid for the algorithm vs. -EKEYREJECTED if the key was rejected by a policy like "no weak keys". That would be much simpler, less error-prone, and easier to test. So just remove CRYPTO_TFM_RES_MASK and all the unneeded logic that propagates these flags around. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm/crypto/ghash-ce-glue.c | 7 +------ arch/s390/crypto/aes_s390.c | 23 +++-------------------- arch/x86/crypto/ghash-clmulni-intel_glue.c | 7 +------ crypto/adiantum.c | 8 -------- crypto/authenc.c | 6 ------ crypto/authencesn.c | 6 ------ crypto/ccm.c | 20 ++++---------------- crypto/chacha20poly1305.c | 7 +------ crypto/cipher.c | 1 - crypto/cryptd.c | 13 ++----------- crypto/ctr.c | 7 +------ crypto/cts.c | 6 +----- crypto/essiv.c | 22 ++++------------------ crypto/gcm.c | 19 ++----------------- crypto/lrw.c | 2 -- crypto/simd.c | 12 ++---------- crypto/skcipher.c | 6 +----- crypto/xts.c | 8 +------- drivers/crypto/amcc/crypto4xx_alg.c | 20 ++------------------ drivers/crypto/atmel-aes.c | 5 +---- drivers/crypto/atmel-authenc.h | 3 +-- drivers/crypto/atmel-sha.c | 11 +++-------- drivers/crypto/bcm/cipher.c | 14 ++------------ drivers/crypto/chelsio/chcr_algo.c | 24 +----------------------- drivers/crypto/geode-aes.c | 16 ++-------------- drivers/crypto/inside-secure/safexcel_cipher.c | 5 ----- drivers/crypto/inside-secure/safexcel_hash.c | 6 ------ drivers/crypto/mediatek/mtk-aes.c | 2 -- drivers/crypto/mxs-dcp.c | 12 +----------- drivers/crypto/picoxcell_crypto.c | 9 --------- drivers/crypto/sahara.c | 9 +-------- include/linux/crypto.h | 2 -- 32 files changed, 38 insertions(+), 280 deletions(-) (limited to 'crypto') diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 7e8b2f55685c..a00fd329255f 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -294,16 +294,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; - int err; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(child, key, keylen); - crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) - & CRYPTO_TFM_RES_MASK); - - return err; + return crypto_ahash_setkey(child, key, keylen); } static int ghash_async_init_tfm(struct crypto_tfm *tfm) diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 2db167e5871c..1c23d84a9097 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -72,19 +72,12 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - int ret; sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); - if (ret) { - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } - return ret; + return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, @@ -182,18 +175,13 @@ static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); - int ret; crypto_skcipher_clear_flags(sctx->fallback.skcipher, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(sctx->fallback.skcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(sctx->fallback.skcipher) & - CRYPTO_TFM_RES_MASK); - return ret; + return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); } static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, @@ -389,17 +377,12 @@ static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); - int ret; crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(xts_ctx->fallback, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(xts_ctx->fallback) & - CRYPTO_TFM_RES_MASK); - return ret; + return crypto_skcipher_setkey(xts_ctx->fallback, key, len); } static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 4a9c9833a7d6..a4b728518e28 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -255,16 +255,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; - int err; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(child, key, keylen); - crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) - & CRYPTO_TFM_RES_MASK); - - return err; + return crypto_ahash_setkey(child, key, keylen); } static int ghash_async_init_tfm(struct crypto_tfm *tfm) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index aded26092268..30cffb45b88f 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -135,9 +135,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(tctx->streamcipher) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -167,9 +164,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(tctx->blockcipher, keyp, BLOCKCIPHER_KEY_SIZE); - crypto_skcipher_set_flags(tfm, - crypto_cipher_get_flags(tctx->blockcipher) & - CRYPTO_TFM_RES_MASK); if (err) goto out; keyp += BLOCKCIPHER_KEY_SIZE; @@ -182,8 +176,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE); - crypto_skcipher_set_flags(tfm, crypto_shash_get_flags(tctx->hash) & - CRYPTO_TFM_RES_MASK); keyp += NHPOLY1305_KEY_SIZE; WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); out: diff --git a/crypto/authenc.c b/crypto/authenc.c index 0da80632e872..15aaddd34171 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -97,9 +97,6 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); - crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & - CRYPTO_TFM_RES_MASK); - if (err) goto out; @@ -107,9 +104,6 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); - crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) & - CRYPTO_TFM_RES_MASK); - out: memzero_explicit(&keys, sizeof(keys)); return err; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 749527e1b617..fc81324ce881 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -71,9 +71,6 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); - crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & - CRYPTO_TFM_RES_MASK); - if (err) goto out; @@ -81,9 +78,6 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); - crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) & - CRYPTO_TFM_RES_MASK); - out: memzero_explicit(&keys, sizeof(keys)); return err; diff --git a/crypto/ccm.c b/crypto/ccm.c index 380eb619f657..44104524e95a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -91,26 +91,19 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_skcipher *ctr = ctx->ctr; struct crypto_ahash *mac = ctx->mac; - int err = 0; + int err; crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & - CRYPTO_TFM_RES_MASK); if (err) - goto out; + return err; crypto_ahash_clear_flags(mac, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(mac, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(mac, key, keylen); - crypto_aead_set_flags(aead, crypto_ahash_get_flags(mac) & - CRYPTO_TFM_RES_MASK); - -out: - return err; + return crypto_ahash_setkey(mac, key, keylen); } static int crypto_ccm_setauthsize(struct crypto_aead *tfm, @@ -604,7 +597,6 @@ static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, { struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; - int err; if (keylen < 3) return -EINVAL; @@ -615,11 +607,7 @@ static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_aead_setkey(child, key, keylen); - crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 74e824e537e6..88cbdaba43b8 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -477,7 +477,6 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chachapoly_ctx *ctx = crypto_aead_ctx(aead); - int err; if (keylen != ctx->saltlen + CHACHA_KEY_SIZE) return -EINVAL; @@ -488,11 +487,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); - - err = crypto_skcipher_setkey(ctx->chacha, key, keylen); - crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_skcipher_setkey(ctx->chacha, key, keylen); } static int chachapoly_setauthsize(struct crypto_aead *tfm, diff --git a/crypto/cipher.c b/crypto/cipher.c index 0fb7042a709d..fd78150deb1c 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -45,7 +45,6 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm, struct cipher_alg *cia = crypto_cipher_alg(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); - crypto_cipher_clear_flags(tfm, CRYPTO_TFM_RES_MASK); if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) return -EINVAL; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 2c6649b10923..cd94243a1605 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -252,17 +252,12 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_sync_skcipher *child = ctx->child; - int err; crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, - crypto_sync_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_sync_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -491,15 +486,11 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent, { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); struct crypto_shash *child = ctx->child; - int err; crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_shash_setkey(child, key, keylen); - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_shash_setkey(child, key, keylen); } static int cryptd_hash_enqueue(struct ahash_request *req, diff --git a/crypto/ctr.c b/crypto/ctr.c index 1e9d6b86b3c6..b63b19de68a9 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -170,7 +170,6 @@ static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, { struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; - int err; /* the nonce is stored in bytes at end of key */ if (keylen < CTR_RFC3686_NONCE_SIZE) @@ -184,11 +183,7 @@ static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_skcipher_setkey(child, key, keylen); } static int crypto_rfc3686_crypt(struct skcipher_request *req) diff --git a/crypto/cts.c b/crypto/cts.c index 6b6087dbb62a..a0bb994f8b11 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -78,15 +78,11 @@ static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key, { struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; - int err; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_skcipher_setkey(child, key, keylen); } static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err) diff --git a/crypto/essiv.c b/crypto/essiv.c index f49bd6fc6972..61d9000ae4ad 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -75,9 +75,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(tctx->u.skcipher, key, keylen); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(tctx->u.skcipher) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -90,13 +87,8 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, crypto_cipher_set_flags(tctx->essiv_cipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(tctx->essiv_cipher, salt, - crypto_shash_digestsize(tctx->hash)); - crypto_skcipher_set_flags(tfm, - crypto_cipher_get_flags(tctx->essiv_cipher) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_cipher_setkey(tctx->essiv_cipher, salt, + crypto_shash_digestsize(tctx->hash)); } static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key, @@ -112,8 +104,6 @@ static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key, crypto_aead_set_flags(tctx->u.aead, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(tctx->u.aead, key, keylen); - crypto_aead_set_flags(tfm, crypto_aead_get_flags(tctx->u.aead) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -130,12 +120,8 @@ static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key, crypto_cipher_clear_flags(tctx->essiv_cipher, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(tctx->essiv_cipher, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(tctx->essiv_cipher, salt, - crypto_shash_digestsize(tctx->hash)); - crypto_aead_set_flags(tfm, crypto_cipher_get_flags(tctx->essiv_cipher) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_cipher_setkey(tctx->essiv_cipher, salt, + crypto_shash_digestsize(tctx->hash)); } static int essiv_aead_setauthsize(struct crypto_aead *tfm, diff --git a/crypto/gcm.c b/crypto/gcm.c index 73884208f075..7041cb1b6fd5 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -111,8 +111,6 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -141,9 +139,6 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); - crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & - CRYPTO_TFM_RES_MASK); - out: kzfree(data); return err; @@ -727,7 +722,6 @@ static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; - int err; if (keylen < 4) return -EINVAL; @@ -738,11 +732,7 @@ static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_aead_setkey(child, key, keylen); - crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, @@ -956,7 +946,6 @@ static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; - int err; if (keylen < 4) return -EINVAL; @@ -967,11 +956,7 @@ static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_aead_setkey(child, key, keylen); - crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, diff --git a/crypto/lrw.c b/crypto/lrw.c index be829f6afc8e..8ebd79276c78 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -79,8 +79,6 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(child, key, keylen - bsize); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); if (err) return err; diff --git a/crypto/simd.c b/crypto/simd.c index 48876266cf2d..56885af49c24 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -52,15 +52,11 @@ static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child = &ctx->cryptd_tfm->base; - int err; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, key_len); - crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_skcipher_setkey(child, key, key_len); } static int simd_skcipher_encrypt(struct skcipher_request *req) @@ -295,15 +291,11 @@ static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *child = &ctx->cryptd_tfm->base; - int err; crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_aead_setkey(child, key, key_len); - crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_aead_setkey(child, key, key_len); } static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 8c8735f75478..89137a197fc8 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -876,15 +876,11 @@ static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - int err; crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(cipher, key, keylen); - crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_cipher_setkey(cipher, key, keylen); } static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) diff --git a/crypto/xts.c b/crypto/xts.c index ab117633d64e..19d55489e78b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -61,8 +61,6 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(tweak, key + keylen, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -71,11 +69,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; + return crypto_skcipher_setkey(child, key, keylen); } /* diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 121eb81df64f..f7fc0c464125 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -289,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, const u8 *key, unsigned int keylen) { - int rc; - crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher, crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); - rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); - crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK); - crypto_skcipher_set_flags(cipher, - crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) & - CRYPTO_TFM_RES_MASK); - - return rc; + return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); } int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, @@ -376,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx, const u8 *key, unsigned int keylen) { - int rc; - crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(ctx->sw_cipher.aead, crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); - rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); - crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(cipher, - crypto_aead_get_flags(ctx->sw_cipher.aead) & - CRYPTO_TFM_RES_MASK); - - return rc; + return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); } /** diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 898f66cb2eb2..466c15b474da 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2041,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, { struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_authenc_keys keys; - u32 flags; int err; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) @@ -2051,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, goto badkey; /* Save auth key. */ - flags = crypto_aead_get_flags(tfm); err = atmel_sha_authenc_setkey(ctx->auth, keys.authkey, keys.authkeylen, - &flags); - crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK); + crypto_aead_get_flags(tfm)); if (err) { memzero_explicit(&keys, sizeof(keys)); return err; diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h index d6de810df44f..c6530a1c8c20 100644 --- a/drivers/crypto/atmel-authenc.h +++ b/drivers/crypto/atmel-authenc.h @@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void); struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode); void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth); int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, - const u8 *key, unsigned int keylen, - u32 *flags); + const u8 *key, unsigned int keylen, u32 flags); int atmel_sha_authenc_schedule(struct ahash_request *req, struct atmel_sha_authenc_ctx *auth, diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index d3bcd14201c2..079fdb8114e9 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2207,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, - const u8 *key, unsigned int keylen, - u32 *flags) + const u8 *key, unsigned int keylen, u32 flags) { struct crypto_ahash *tfm = auth->tfm; - int err; crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); - crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(tfm, key, keylen); - *flags = crypto_ahash_get_flags(tfm); - - return err; + crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK); + return crypto_ahash_setkey(tfm, key, keylen); } EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 184a3e1245cf..c8b9408541a9 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2893,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); - if (ret) { + if (ret) flow_log(" fallback setkey() returned:%d\n", ret); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - (ctx->fallback_cipher->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, @@ -2965,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen + ctx->salt_len); - if (ret) { + if (ret) flow_log(" fallback setkey() returned:%d\n", ret); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - (ctx->fallback_cipher->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 720b2ff55464..b4b9b22125d1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - int err = 0; crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - return err; + return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -3302,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; return chcr_ccm_common_setkey(aead, key, keylen); @@ -3324,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; keylen -= 3; @@ -3348,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (ret) goto out; @@ -3416,9 +3400,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; @@ -3544,9 +3525,6 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index eb6e6b618361..f4f18bfc2247 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, unsigned int len) { struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - unsigned int ret; tctx->keylen = len; @@ -130,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, tctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(tctx->fallback.cip, key, len); - if (ret) { - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } - return ret; + return crypto_cipher_setkey(tctx->fallback.cip, key, len); } static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - unsigned int ret; tctx->keylen = len; @@ -164,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, crypto_skcipher_set_flags(tctx->fallback.skcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(tctx->fallback.skcipher) & - CRYPTO_TFM_RES_MASK); - return ret; + return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); } static void diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5ee66532f336..0c5e80c3f6e3 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -499,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, goto badkey; } - crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & - CRYPTO_TFM_RES_MASK); - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && (memcmp(ctx->ipad, istate.state, ctx->state_sz) || memcmp(ctx->opad, ostate.state, ctx->state_sz))) @@ -2583,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->hkaes, key, len); - crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 088d7f8aab5e..43962bc709c6 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -2069,8 +2069,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->kaes, key, len); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; @@ -2090,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, ret = crypto_cipher_setkey(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, AES_MIN_KEY_SIZE); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; @@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->kaes, key, len); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 00e580bf8536..78d660d963e2 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -1031,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & - CRYPTO_TFM_RES_MASK); if (err) return err; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index f438b425c655..435ac1c83df9 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - unsigned int ret; /* * AES 128 is supposed by the hardware, store key into temporary @@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(actx->fallback, key, len); - if (!ret) - return 0; - - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) & - CRYPTO_TFM_RES_MASK; - - return ret; + return crypto_sync_skcipher_setkey(actx->fallback, key, len); } static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index ced4cbed9ea0..7384e91c8b32 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(ctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -802,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len); - - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - if (err) goto sw_setkey_failed; } diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d4ea2f11ca68..466e30bd529c 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - int ret; ctx->keylen = keylen; @@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); - - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) & - CRYPTO_TFM_RES_MASK; - return ret; + return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); } static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index accd0c8038fd..763863dbc079 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -107,8 +107,6 @@ #define CRYPTO_TFM_NEED_KEY 0x00000001 #define CRYPTO_TFM_REQ_MASK 0x000fff00 -#define CRYPTO_TFM_RES_MASK 0xfff00000 - #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 -- cgit v1.2.3 From ff67062796e97cbb009ab0ca30176abb24b9a325 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:41 -0800 Subject: crypto: algapi - make crypto_drop_spawn() a no-op on uninitialized spawns Make crypto_drop_spawn() do nothing when the spawn hasn't been initialized with an algorithm yet. This will allow simplifying error handling in all the template ->create() functions, since on error they will be able to just call their usual "free instance" function, rather than having to handle dropping just the spawns that have been initialized so far. This does assume the spawn starts out zero-filled, but that's always the case since instances are allocated with kzalloc(). And some other code already assumes this anyway. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 363849983941..4c761f48110d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -734,6 +734,9 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn); void crypto_drop_spawn(struct crypto_spawn *spawn) { + if (!spawn->alg) /* not yet initialized? */ + return; + down_write(&crypto_alg_sem); if (!spawn->dead) list_del(&spawn->list); -- cgit v1.2.3 From ca94e9374a7d86e0594ed824b437656ca83d47b4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:42 -0800 Subject: crypto: algapi - make crypto_grab_spawn() handle an ERR_PTR() name To allow further simplifying template ->create() functions, make crypto_grab_spawn() handle an ERR_PTR() name by passing back the error. For most templates, this will allow the result of crypto_attr_alg_name() to be passed directly to crypto_grab_*(), rather than first having to assign it to a variable [where it can then potentially be misused, as it was in the rfc7539 template prior to commit 5e27f38f1f3f ("crypto: chacha20poly1305 - set cra_name correctly")] and check it for error. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 4c761f48110d..a5223c5f2275 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -720,6 +720,10 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, struct crypto_alg *alg; int err; + /* Allow the result of crypto_attr_alg_name() to be passed directly */ + if (IS_ERR(name)) + return PTR_ERR(name); + alg = crypto_find_alg(name, spawn->frontend, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); -- cgit v1.2.3 From b9f76dddb1f9f70e008b982381bbc9a67c9b8c66 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:45 -0800 Subject: crypto: skcipher - pass instance to crypto_grab_skcipher() Initializing a crypto_skcipher_spawn currently requires: 1. Set spawn->base.inst to point to the instance. 2. Call crypto_grab_skcipher(). But there's no reason for these steps to be separate, and in fact this unneeded complication has caused at least one bug, the one fixed by commit 6db43410179b ("crypto: adiantum - initialize crypto_spawn::inst") So just make crypto_grab_skcipher() take the instance as an argument. To keep the function calls from getting too unwieldy due to this extra argument, also introduce a 'mask' variable into the affected places which weren't already using one. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 11 ++++++----- crypto/authenc.c | 12 ++++++------ crypto/authencesn.c | 12 ++++++------ crypto/ccm.c | 9 +++++---- crypto/chacha20poly1305.c | 13 ++++++------- crypto/cryptd.c | 4 ++-- crypto/ctr.c | 4 ++-- crypto/cts.c | 9 +++++---- crypto/essiv.c | 13 +++++-------- crypto/gcm.c | 13 ++++++------- crypto/lrw.c | 15 ++++++++------- crypto/skcipher.c | 4 +++- crypto/xts.c | 9 +++++---- include/crypto/internal/skcipher.h | 11 +++-------- 14 files changed, 68 insertions(+), 71 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 30cffb45b88f..567071464fab 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -493,6 +493,7 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; const char *streamcipher_name; const char *blockcipher_name; const char *nhpoly1305_name; @@ -511,6 +512,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + streamcipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(streamcipher_name)) return PTR_ERR(streamcipher_name); @@ -531,11 +534,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) ictx = skcipher_instance_ctx(inst); /* Stream cipher, e.g. "xchacha12" */ - crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, - skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, - 0, crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ictx->streamcipher_spawn, + skcipher_crypto_instance(inst), + streamcipher_name, 0, mask); if (err) goto out_free_inst; streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); diff --git a/crypto/authenc.c b/crypto/authenc.c index 15aaddd34171..e31bcec58564 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -373,6 +373,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct hash_alg_common *auth; struct crypto_alg *auth_base; @@ -388,9 +389,10 @@ static int crypto_authenc_create(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | - crypto_requires_sync(algt->type, algt->mask)); + CRYPTO_ALG_TYPE_AHASH_MASK | mask); if (IS_ERR(auth)) return PTR_ERR(auth); @@ -413,10 +415,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl, if (err) goto err_free_inst; - crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst), + enc_name, 0, mask); if (err) goto err_drop_auth; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index fc81324ce881..83bda7f905bb 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -391,6 +391,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct hash_alg_common *auth; struct crypto_alg *auth_base; @@ -406,9 +407,10 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | - crypto_requires_sync(algt->type, algt->mask)); + CRYPTO_ALG_TYPE_AHASH_MASK | mask); if (IS_ERR(auth)) return PTR_ERR(auth); @@ -431,10 +433,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, if (err) goto err_free_inst; - crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst), + enc_name, 0, mask); if (err) goto err_drop_auth; diff --git a/crypto/ccm.c b/crypto/ccm.c index 44104524e95a..4414f0ddfe5a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -450,6 +450,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *mac_name) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct skcipher_alg *ctr; struct crypto_alg *mac_alg; @@ -464,6 +465,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + mac_alg = crypto_find_alg(mac_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_AHASH_MASK | @@ -488,10 +491,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, if (err) goto err_free_inst; - crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ictx->ctr, aead_crypto_instance(inst), + ctr_name, 0, mask); if (err) goto err_drop_mac; diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 88cbdaba43b8..09d5a34ab339 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -558,6 +558,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct skcipher_alg *chacha; struct crypto_alg *poly; @@ -576,6 +577,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + chacha_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(chacha_name)) return PTR_ERR(chacha_name); @@ -585,9 +588,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly = crypto_find_alg(poly_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | - crypto_requires_sync(algt->type, - algt->mask)); + CRYPTO_ALG_TYPE_AHASH_MASK | mask); if (IS_ERR(poly)) return PTR_ERR(poly); poly_hash = __crypto_hash_alg_common(poly); @@ -608,10 +609,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->chacha, aead_crypto_instance(inst), + chacha_name, 0, mask); if (err) goto err_drop_poly; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index cd94243a1605..a0fe10624cfd 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -416,8 +416,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, ctx = skcipher_instance_ctx(inst); ctx->queue = queue; - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), + name, type, mask); if (err) goto out_free_inst; diff --git a/crypto/ctr.c b/crypto/ctr.c index b63b19de68a9..a8feab621c6c 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -286,8 +286,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, spawn = skcipher_instance_ctx(inst); - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(spawn, cipher_name, 0, mask); + err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), + cipher_name, 0, mask); if (err) goto err_free_inst; diff --git a/crypto/cts.c b/crypto/cts.c index a0bb994f8b11..48188adc8e91 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -328,6 +328,7 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -337,6 +338,8 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); @@ -347,10 +350,8 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), + cipher_name, 0, mask); if (err) goto err_free_inst; diff --git a/crypto/essiv.c b/crypto/essiv.c index 61d9000ae4ad..0e45f5b4f67f 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -452,6 +452,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int ivsize; u32 type; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -467,6 +468,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; + mask = crypto_requires_sync(algt->type, algt->mask); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -479,11 +481,8 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) ictx = crypto_instance_ctx(inst); /* Symmetric cipher, e.g., "cbc(aes)" */ - crypto_set_skcipher_spawn(&ictx->u.skcipher_spawn, inst); - err = crypto_grab_skcipher(&ictx->u.skcipher_spawn, - inner_cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ictx->u.skcipher_spawn, inst, + inner_cipher_name, 0, mask); if (err) goto out_free_inst; skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn); @@ -503,9 +502,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* AEAD cipher, e.g., "authenc(hmac(sha256),cbc(aes))" */ crypto_set_aead_spawn(&ictx->u.aead_spawn, inst); err = crypto_grab_aead(&ictx->u.aead_spawn, - inner_cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + inner_cipher_name, 0, mask); if (err) goto out_free_inst; aead_alg = crypto_spawn_aead_alg(&ictx->u.aead_spawn); diff --git a/crypto/gcm.c b/crypto/gcm.c index 7041cb1b6fd5..887f472734b6 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -580,6 +580,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ghash_name) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct skcipher_alg *ctr; struct crypto_alg *ghash_alg; @@ -594,11 +595,11 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | - crypto_requires_sync(algt->type, - algt->mask)); + CRYPTO_ALG_TYPE_AHASH_MASK | mask); if (IS_ERR(ghash_alg)) return PTR_ERR(ghash_alg); @@ -620,10 +621,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ghash->digestsize != 16) goto err_drop_ghash; - crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst), + ctr_name, 0, mask); if (err) goto err_drop_ghash; diff --git a/crypto/lrw.c b/crypto/lrw.c index 8ebd79276c78..63c485c0d8a6 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -301,6 +301,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -310,6 +311,8 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); @@ -320,19 +323,17 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), + cipher_name, 0, mask); if (err == -ENOENT) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - err = crypto_grab_skcipher(spawn, ecb_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher(spawn, + skcipher_crypto_instance(inst), + ecb_name, 0, mask); } if (err) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 89137a197fc8..8759d473a154 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -747,8 +747,10 @@ static const struct crypto_type crypto_skcipher_type = { }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, - const char *name, u32 type, u32 mask) + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { + spawn->base.inst = inst; spawn->base.frontend = &crypto_skcipher_type; return crypto_grab_spawn(&spawn->base, name, type, mask); } diff --git a/crypto/xts.c b/crypto/xts.c index 19d55489e78b..29efa15f1495 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -355,20 +355,21 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); - mask = crypto_requires_off(algt->type, algt->mask, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), + cipher_name, 0, mask); if (err == -ENOENT) { err = -ENAMETOOLONG; if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); + err = crypto_grab_skcipher(&ctx->spawn, + skcipher_crypto_instance(inst), + ctx->name, 0, mask); } if (err) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index df4fdeaa13f3..e387424f6247 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -88,14 +88,9 @@ static inline void skcipher_request_complete(struct skcipher_request *req, int e req->base.complete(&req->base, err); } -static inline void crypto_set_skcipher_spawn( - struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) -{ - crypto_set_spawn(&spawn->base, inst); -} - -int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, - u32 type, u32 mask); +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { -- cgit v1.2.3 From cd900f0cacd7601dabdd028e8cbdbf2a7041cee2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:46 -0800 Subject: crypto: aead - pass instance to crypto_grab_aead() Initializing a crypto_aead_spawn currently requires: 1. Set spawn->base.inst to point to the instance. 2. Call crypto_grab_aead(). But there's no reason for these steps to be separate, and in fact this unneeded complication has caused at least one bug, the one fixed by commit 6db43410179b ("crypto: adiantum - initialize crypto_spawn::inst") So just make crypto_grab_aead() take the instance as an argument. To keep the function calls from getting too unwieldy due to this extra argument, also introduce a 'mask' variable into the affected places which weren't already using one. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/aead.c | 6 ++++-- crypto/ccm.c | 8 +++++--- crypto/cryptd.c | 4 ++-- crypto/essiv.c | 3 +-- crypto/gcm.c | 16 ++++++++++------ crypto/geniv.c | 4 ++-- crypto/pcrypt.c | 5 ++--- include/crypto/internal/aead.h | 11 +++-------- 8 files changed, 29 insertions(+), 28 deletions(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index 47f16d139e8e..c7135e00b8ea 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -207,9 +207,11 @@ static const struct crypto_type crypto_aead_type = { .tfmsize = offsetof(struct crypto_aead, base), }; -int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, - u32 type, u32 mask) +int crypto_grab_aead(struct crypto_aead_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { + spawn->base.inst = inst; spawn->base.frontend = &crypto_aead_type; return crypto_grab_spawn(&spawn->base, name, type, mask); } diff --git a/crypto/ccm.c b/crypto/ccm.c index 4414f0ddfe5a..48766e81b933 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -734,6 +734,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; @@ -747,6 +748,8 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + ccm_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ccm_name)) return PTR_ERR(ccm_name); @@ -756,9 +759,8 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, return -ENOMEM; spawn = aead_instance_ctx(inst); - crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(spawn, ccm_name, 0, - crypto_requires_sync(algt->type, algt->mask)); + err = crypto_grab_aead(spawn, aead_crypto_instance(inst), + ccm_name, 0, mask); if (err) goto out_free_inst; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index a0fe10624cfd..a03ac2878017 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -865,8 +865,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, ctx = aead_instance_ctx(inst); ctx->queue = queue; - crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); + err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), + name, type, mask); if (err) goto out_free_inst; diff --git a/crypto/essiv.c b/crypto/essiv.c index 0e45f5b4f67f..20d7c1fdbf5d 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -500,8 +500,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) ictx = crypto_instance_ctx(inst); /* AEAD cipher, e.g., "authenc(hmac(sha256),cbc(aes))" */ - crypto_set_aead_spawn(&ictx->u.aead_spawn, inst); - err = crypto_grab_aead(&ictx->u.aead_spawn, + err = crypto_grab_aead(&ictx->u.aead_spawn, inst, inner_cipher_name, 0, mask); if (err) goto out_free_inst; diff --git a/crypto/gcm.c b/crypto/gcm.c index 887f472734b6..72649b8cbf2a 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -856,6 +856,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; @@ -869,6 +870,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + ccm_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ccm_name)) return PTR_ERR(ccm_name); @@ -878,9 +881,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, return -ENOMEM; spawn = aead_instance_ctx(inst); - crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(spawn, ccm_name, 0, - crypto_requires_sync(algt->type, algt->mask)); + err = crypto_grab_aead(spawn, aead_crypto_instance(inst), + ccm_name, 0, mask); if (err) goto out_free_inst; @@ -1087,6 +1089,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; + u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; @@ -1101,6 +1104,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + ccm_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ccm_name)) return PTR_ERR(ccm_name); @@ -1111,9 +1116,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, ctx = aead_instance_ctx(inst); spawn = &ctx->aead; - crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(spawn, ccm_name, 0, - crypto_requires_sync(algt->type, algt->mask)); + err = crypto_grab_aead(spawn, aead_crypto_instance(inst), + ccm_name, 0, mask); if (err) goto out_free_inst; diff --git a/crypto/geniv.c b/crypto/geniv.c index b9e45a2a98b5..7afa48414f3a 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -64,8 +64,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, /* Ignore async algorithms if necessary. */ mask |= crypto_requires_sync(algt->type, algt->mask); - crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(spawn, name, type, mask); + err = crypto_grab_aead(spawn, aead_crypto_instance(inst), + name, type, mask); if (err) goto err_free_inst; diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index d6696e217128..1b632139a8c1 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -258,9 +258,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (!ctx->psdec) goto out_free_psenc; - crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); - - err = crypto_grab_aead(&ctx->spawn, name, 0, 0); + err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), + name, 0, 0); if (err) goto out_free_psdec; diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index 374185a7567f..27b7b0224ea6 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -81,14 +81,9 @@ static inline struct aead_request *aead_request_cast( return container_of(req, struct aead_request, base); } -static inline void crypto_set_aead_spawn( - struct crypto_aead_spawn *spawn, struct crypto_instance *inst) -{ - crypto_set_spawn(&spawn->base, inst); -} - -int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, - u32 type, u32 mask); +int crypto_grab_aead(struct crypto_aead_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) { -- cgit v1.2.3 From 73bed26f73a120f14cabf8d214ec5078bb42dea9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:47 -0800 Subject: crypto: akcipher - pass instance to crypto_grab_akcipher() Initializing a crypto_akcipher_spawn currently requires: 1. Set spawn->base.inst to point to the instance. 2. Call crypto_grab_akcipher(). But there's no reason for these steps to be separate, and in fact this unneeded complication has caused at least one bug, the one fixed by commit 6db43410179b ("crypto: adiantum - initialize crypto_spawn::inst") So just make crypto_grab_akcipher() take the instance as an argument. To keep the function call from getting too unwieldy due to this extra argument, also introduce a 'mask' variable into pkcs1pad_create(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/akcipher.c | 6 ++++-- crypto/rsa-pkcs1pad.c | 8 +++++--- include/crypto/internal/akcipher.h | 12 +++--------- 3 files changed, 12 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 7d5cf4939423..84ccf9b02bbe 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -90,9 +90,11 @@ static const struct crypto_type crypto_akcipher_type = { .tfmsize = offsetof(struct crypto_akcipher, base), }; -int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, - u32 type, u32 mask) +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { + spawn->base.inst = inst; spawn->base.frontend = &crypto_akcipher_type; return crypto_grab_spawn(&spawn->base, name, type, mask); } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 0aa489711ec4..176b63afec8d 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -598,6 +598,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { const struct rsa_asn1_template *digest_info; struct crypto_attr_type *algt; + u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; struct crypto_akcipher_spawn *spawn; @@ -613,6 +614,8 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) return -EINVAL; + mask = crypto_requires_sync(algt->type, algt->mask); + rsa_alg_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(rsa_alg_name)) return PTR_ERR(rsa_alg_name); @@ -636,9 +639,8 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = &ctx->spawn; ctx->digest_info = digest_info; - crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); - err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, - crypto_requires_sync(algt->type, algt->mask)); + err = crypto_grab_akcipher(spawn, akcipher_crypto_instance(inst), + rsa_alg_name, 0, mask); if (err) goto out_free_inst; diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index d6c8a42789ad..8d3220c9ab77 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -78,15 +78,9 @@ static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) return crypto_instance_ctx(akcipher_crypto_instance(inst)); } -static inline void crypto_set_akcipher_spawn( - struct crypto_akcipher_spawn *spawn, - struct crypto_instance *inst) -{ - crypto_set_spawn(&spawn->base, inst); -} - -int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, - u32 type, u32 mask); +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); static inline struct crypto_akcipher *crypto_spawn_akcipher( struct crypto_akcipher_spawn *spawn) -- cgit v1.2.3 From de95c9574108ec304083ed574304ab3b60b4167c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:48 -0800 Subject: crypto: algapi - pass instance to crypto_grab_spawn() Currently, crypto_spawn::inst is first used temporarily to pass the instance to crypto_grab_spawn(). Then crypto_init_spawn() overwrites it with crypto_spawn::next, which shares the same union. Finally, crypto_spawn::inst is set again when the instance is registered. Make this less convoluted by just passing the instance as an argument to crypto_grab_spawn() instead. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 6 +++--- crypto/aead.c | 3 +-- crypto/akcipher.c | 3 +-- crypto/algapi.c | 6 +++--- crypto/skcipher.c | 3 +-- include/crypto/algapi.h | 10 ++-------- 6 files changed, 11 insertions(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 567071464fab..76a41881ee8c 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -542,9 +542,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ - crypto_set_spawn(&ictx->blockcipher_spawn, - skcipher_crypto_instance(inst)); - err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, + err = crypto_grab_spawn(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst), + blockcipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (err) goto out_drop_streamcipher; diff --git a/crypto/aead.c b/crypto/aead.c index c7135e00b8ea..02a0db076d7e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -211,9 +211,8 @@ int crypto_grab_aead(struct crypto_aead_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { - spawn->base.inst = inst; spawn->base.frontend = &crypto_aead_type; - return crypto_grab_spawn(&spawn->base, name, type, mask); + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_aead); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 84ccf9b02bbe..eeed6c151d2f 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -94,9 +94,8 @@ int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { - spawn->base.inst = inst; spawn->base.frontend = &crypto_akcipher_type; - return crypto_grab_spawn(&spawn->base, name, type, mask); + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_akcipher); diff --git a/crypto/algapi.c b/crypto/algapi.c index a5223c5f2275..a25ce02918f8 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -714,8 +714,8 @@ out: } EXPORT_SYMBOL_GPL(crypto_init_spawn2); -int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, - u32 type, u32 mask) +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { struct crypto_alg *alg; int err; @@ -729,7 +729,7 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, return PTR_ERR(alg); spawn->dropref = true; - err = crypto_init_spawn(spawn, alg, spawn->inst, mask); + err = crypto_init_spawn(spawn, alg, inst, mask); if (err) crypto_mod_put(alg); return err; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 8759d473a154..15c033c960f7 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -750,9 +750,8 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { - spawn->base.inst = inst; spawn->base.frontend = &crypto_skcipher_type; - return crypto_grab_spawn(&spawn->base, name, type, mask); + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_skcipher); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 5022cada4fc6..2779c8d34ba9 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -116,20 +116,14 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, struct crypto_instance *inst, const struct crypto_type *frontend); -int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, - u32 type, u32 mask); +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask); void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); void *crypto_spawn_tfm2(struct crypto_spawn *spawn); -static inline void crypto_set_spawn(struct crypto_spawn *spawn, - struct crypto_instance *inst) -{ - spawn->inst = inst; -} - struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); -- cgit v1.2.3 From fdfad1fffc2aa199fb447c152a00a5e383a5b973 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:49 -0800 Subject: crypto: shash - introduce crypto_grab_shash() Currently, shash spawns are initialized by using shash_attr_alg() or crypto_alg_mod_lookup() to look up the shash algorithm, then calling crypto_init_shash_spawn(). This is different from how skcipher, aead, and akcipher spawns are initialized (they use crypto_grab_*()), and for no good reason. This difference introduces unnecessary complexity. The crypto_grab_*() functions used to have some problems, like not holding a reference to the algorithm and requiring the caller to initialize spawn->base.inst. But those problems are fixed now. So, let's introduce crypto_grab_shash() so that we can convert all templates to the same way of initializing their spawns. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 9 +++++++++ include/crypto/internal/hash.h | 10 ++++++++++ 2 files changed, 19 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 7243f60dab87..e0872ac2729a 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -469,6 +469,15 @@ static const struct crypto_type crypto_shash_type = { .tfmsize = offsetof(struct crypto_shash, base), }; +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_shash_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_shash); + struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, u32 mask) { diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 3b426b09bd32..4d1a0d8e4f3a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -128,11 +128,21 @@ int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, struct shash_alg *alg, struct crypto_instance *inst); +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) { crypto_drop_spawn(&spawn->base); } +static inline struct shash_alg *crypto_spawn_shash_alg( + struct crypto_shash_spawn *spawn) +{ + return __crypto_shash_alg(spawn->base.alg); +} + struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); -- cgit v1.2.3 From 84a9c938e5063709f799fd6bab17a8ea723c2eb5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:50 -0800 Subject: crypto: ahash - introduce crypto_grab_ahash() Currently, ahash spawns are initialized by using ahash_attr_alg() or crypto_find_alg() to look up the ahash algorithm, then calling crypto_init_ahash_spawn(). This is different from how skcipher, aead, and akcipher spawns are initialized (they use crypto_grab_*()), and for no good reason. This difference introduces unnecessary complexity. The crypto_grab_*() functions used to have some problems, like not holding a reference to the algorithm and requiring the caller to initialize spawn->base.inst. But those problems are fixed now. So, let's introduce crypto_grab_ahash() so that we can convert all templates to the same way of initializing their spawns. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 9 +++++++++ include/crypto/internal/hash.h | 10 ++++++++++ 2 files changed, 19 insertions(+) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 181bd851b429..e98a1398ed7f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -556,6 +556,15 @@ const struct crypto_type crypto_ahash_type = { }; EXPORT_SYMBOL_GPL(crypto_ahash_type); +int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_ahash_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_ahash); + struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask) { diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 4d1a0d8e4f3a..e1024fa0032f 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -109,11 +109,21 @@ int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); +int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) { crypto_drop_spawn(&spawn->base); } +static inline struct hash_alg_common *crypto_spawn_ahash_alg( + struct crypto_ahash_spawn *spawn) +{ + return __crypto_hash_alg_common(spawn->base.alg); +} + struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_register_shash(struct shash_alg *alg); -- cgit v1.2.3 From ba44840747bdd60095881830af0d75f0e5017c99 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:52 -0800 Subject: crypto: adiantum - use crypto_grab_{cipher,shash} and simplify error paths Make the adiantum template use the new functions crypto_grab_cipher() and crypto_grab_shash() to initialize its cipher and shash spawns. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 85 ++++++++++++++++--------------------------------------- 1 file changed, 25 insertions(+), 60 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 76a41881ee8c..5b8aa14ccb55 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -39,8 +39,6 @@ #include #include -#include "internal.h" - /* * Size of right-hand part of input data, in bytes; also the size of the block * cipher's block size and the hash function's output. @@ -64,7 +62,7 @@ struct adiantum_instance_ctx { struct crypto_skcipher_spawn streamcipher_spawn; - struct crypto_spawn blockcipher_spawn; + struct crypto_cipher_spawn blockcipher_spawn; struct crypto_shash_spawn hash_spawn; }; @@ -410,7 +408,7 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(streamcipher)) return PTR_ERR(streamcipher); - blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); + blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn.base); if (IS_ERR(blockcipher)) { err = PTR_ERR(blockcipher); goto err_free_streamcipher; @@ -461,7 +459,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst) struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ictx->streamcipher_spawn); - crypto_drop_spawn(&ictx->blockcipher_spawn); + crypto_drop_cipher(&ictx->blockcipher_spawn); crypto_drop_shash(&ictx->hash_spawn); kfree(inst); } @@ -494,14 +492,11 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; u32 mask; - const char *streamcipher_name; - const char *blockcipher_name; const char *nhpoly1305_name; struct skcipher_instance *inst; struct adiantum_instance_ctx *ictx; struct skcipher_alg *streamcipher_alg; struct crypto_alg *blockcipher_alg; - struct crypto_alg *_hash_alg; struct shash_alg *hash_alg; int err; @@ -514,20 +509,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) mask = crypto_requires_sync(algt->type, algt->mask); - streamcipher_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(streamcipher_name)) - return PTR_ERR(streamcipher_name); - - blockcipher_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(blockcipher_name)) - return PTR_ERR(blockcipher_name); - - nhpoly1305_name = crypto_attr_alg_name(tb[3]); - if (nhpoly1305_name == ERR_PTR(-ENOENT)) - nhpoly1305_name = "nhpoly1305"; - if (IS_ERR(nhpoly1305_name)) - return PTR_ERR(nhpoly1305_name); - inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -536,33 +517,29 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) /* Stream cipher, e.g. "xchacha12" */ err = crypto_grab_skcipher(&ictx->streamcipher_spawn, skcipher_crypto_instance(inst), - streamcipher_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto out_free_inst; + goto err_free_inst; streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ - err = crypto_grab_spawn(&ictx->blockcipher_spawn, - skcipher_crypto_instance(inst), - blockcipher_name, - CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); + err = crypto_grab_cipher(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[2]), 0, mask); if (err) - goto out_drop_streamcipher; - blockcipher_alg = ictx->blockcipher_spawn.alg; + goto err_free_inst; + blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); /* NHPoly1305 ε-∆U hash function */ - _hash_alg = crypto_alg_mod_lookup(nhpoly1305_name, - CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(_hash_alg)) { - err = PTR_ERR(_hash_alg); - goto out_drop_blockcipher; - } - hash_alg = __crypto_shash_alg(_hash_alg); - err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg, - skcipher_crypto_instance(inst)); + nhpoly1305_name = crypto_attr_alg_name(tb[3]); + if (nhpoly1305_name == ERR_PTR(-ENOENT)) + nhpoly1305_name = "nhpoly1305"; + err = crypto_grab_shash(&ictx->hash_spawn, + skcipher_crypto_instance(inst), + nhpoly1305_name, 0, mask); if (err) - goto out_put_hash; + goto err_free_inst; + hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn); /* Check the set of algorithms */ if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg, @@ -571,7 +548,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) streamcipher_alg->base.cra_name, blockcipher_alg->cra_name, hash_alg->base.cra_name); err = -EINVAL; - goto out_drop_hash; + goto err_free_inst; } /* Instance fields */ @@ -580,13 +557,13 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "adiantum(%s,%s)", streamcipher_alg->base.cra_name, blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_hash; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "adiantum(%s,%s,%s)", streamcipher_alg->base.cra_driver_name, blockcipher_alg->cra_driver_name, hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_hash; + goto err_free_inst; inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & CRYPTO_ALG_ASYNC; @@ -616,22 +593,10 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->free = adiantum_free_instance; err = skcipher_register_instance(tmpl, inst); - if (err) - goto out_drop_hash; - - crypto_mod_put(_hash_alg); - return 0; - -out_drop_hash: - crypto_drop_shash(&ictx->hash_spawn); -out_put_hash: - crypto_mod_put(_hash_alg); -out_drop_blockcipher: - crypto_drop_spawn(&ictx->blockcipher_spawn); -out_drop_streamcipher: - crypto_drop_skcipher(&ictx->streamcipher_spawn); -out_free_inst: - kfree(inst); + if (err) { +err_free_inst: + adiantum_free_instance(inst); + } return err; } -- cgit v1.2.3 From 218c5035fe33df10823378353957f994d95c079b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:53 -0800 Subject: crypto: cryptd - use crypto_grab_shash() and simplify error paths Make the cryptd template (in the hash case) use the new function crypto_grab_shash() to initialize its shash spawn. This is needed to make all spawns be initialized in a consistent way. This required making cryptd_create_hash() allocate the instance directly rather than use cryptd_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cryptd.c | 68 +++++++++++++++------------------------------------------ 1 file changed, 18 insertions(+), 50 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index a03ac2878017..fb03acac7d9a 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -221,32 +221,6 @@ static int cryptd_init_instance(struct crypto_instance *inst, return 0; } -static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, - unsigned int tail) -{ - char *p; - struct crypto_instance *inst; - int err; - - p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - inst = (void *)(p + head); - - err = cryptd_init_instance(inst, alg); - if (err) - goto out_free_inst; - -out: - return p; - -out_free_inst: - kfree(p); - p = ERR_PTR(err); - goto out; -} - static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { @@ -662,39 +636,36 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; - struct shash_alg *salg; - struct crypto_alg *alg; + struct shash_alg *alg; u32 type = 0; u32 mask = 0; int err; cryptd_check_internal(tb, &type, &mask); - salg = shash_attr_alg(tb[1], type, mask); - if (IS_ERR(salg)) - return PTR_ERR(salg); - - alg = &salg->base; - inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), - sizeof(*ctx)); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; ctx = ahash_instance_ctx(inst); ctx->queue = queue; - err = crypto_init_shash_spawn(&ctx->spawn, salg, - ahash_crypto_instance(inst)); + err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; + alg = crypto_spawn_shash_alg(&ctx->spawn); + + err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); + if (err) + goto err_free_inst; inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->cra_flags & (CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_OPTIONAL_KEY)); + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = salg->digestsize; - inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.digestsize = alg->digestsize; + inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; @@ -706,19 +677,16 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - if (crypto_shash_alg_has_setkey(salg)) + if (crypto_shash_alg_has_setkey(alg)) inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); if (err) { +err_free_inst: crypto_drop_shash(&ctx->spawn); -out_free_inst: kfree(inst); } - -out_put_alg: - crypto_mod_put(alg); return err; } -- cgit v1.2.3 From 39e7a283b3089be44a0473b77f4218791ffeba3f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:54 -0800 Subject: crypto: hmac - use crypto_grab_shash() and simplify error paths Make the hmac template use the new function crypto_grab_shash() to initialize its shash spawn. This is needed to make all spawns be initialized in a consistent way. This required making hmac_create() allocate the instance directly rather than use shash_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hmac.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index 685e49953605..0a42b7075763 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -165,6 +165,7 @@ static void hmac_exit_tfm(struct crypto_shash *parent) static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; + struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; int err; @@ -175,31 +176,32 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) return err; - salg = shash_attr_alg(tb[1], 0, 0); - if (IS_ERR(salg)) - return PTR_ERR(salg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = shash_instance_ctx(inst); + + err = crypto_grab_shash(spawn, shash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, 0); + if (err) + goto err_free_inst; + salg = crypto_spawn_shash_alg(spawn); alg = &salg->base; /* The underlying hash algorithm must not require a key */ err = -EINVAL; if (crypto_shash_alg_needs_key(salg)) - goto out_put_alg; + goto err_free_inst; ds = salg->digestsize; ss = salg->statesize; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) - goto out_put_alg; + goto err_free_inst; - inst = shash_alloc_instance("hmac", alg); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; - - err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, - shash_crypto_instance(inst)); + err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) - goto out_free_inst; + goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; @@ -224,12 +226,9 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) err = shash_register_instance(tmpl, inst); if (err) { -out_free_inst: +err_free_inst: shash_free_instance(shash_crypto_instance(inst)); } - -out_put_alg: - crypto_mod_put(alg); return err; } -- cgit v1.2.3 From 37a861adc95ab7165a33794abda776ea1dad8475 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:55 -0800 Subject: crypto: authenc - use crypto_grab_ahash() and simplify error paths Make the authenc template use the new function crypto_grab_ahash() to initialize its ahash spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authenc.c | 52 ++++++++++++++-------------------------------------- 1 file changed, 14 insertions(+), 38 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index e31bcec58564..775e7138fd10 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -375,11 +375,10 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; + struct authenc_instance_ctx *ctx; struct hash_alg_common *auth; struct crypto_alg *auth_base; struct skcipher_alg *enc; - struct authenc_instance_ctx *ctx; - const char *enc_name; int err; algt = crypto_get_attr_type(tb); @@ -391,35 +390,22 @@ static int crypto_authenc_create(struct crypto_template *tmpl, mask = crypto_requires_sync(algt->type, algt->mask); - auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | mask); - if (IS_ERR(auth)) - return PTR_ERR(auth); - - auth_base = &auth->base; - - enc_name = crypto_attr_alg_name(tb[2]); - err = PTR_ERR(enc_name); - if (IS_ERR(enc_name)) - goto out_put_auth; - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); - err = -ENOMEM; if (!inst) - goto out_put_auth; - + return -ENOMEM; ctx = aead_instance_ctx(inst); - err = crypto_init_ahash_spawn(&ctx->auth, auth, - aead_crypto_instance(inst)); + err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; + auth = crypto_spawn_ahash_alg(&ctx->auth); + auth_base = &auth->base; err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst), - enc_name, 0, mask); + crypto_attr_alg_name(tb[2]), 0, mask); if (err) - goto err_drop_auth; - + goto err_free_inst; enc = crypto_spawn_skcipher_alg(&ctx->enc); ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, @@ -430,12 +416,12 @@ static int crypto_authenc_create(struct crypto_template *tmpl, "authenc(%s,%s)", auth_base->cra_name, enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_enc; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", auth_base->cra_driver_name, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_enc; + goto err_free_inst; inst->alg.base.cra_flags = (auth_base->cra_flags | enc->base.cra_flags) & CRYPTO_ALG_ASYNC; @@ -460,21 +446,11 @@ static int crypto_authenc_create(struct crypto_template *tmpl, inst->free = crypto_authenc_free; err = aead_register_instance(tmpl, inst); - if (err) - goto err_drop_enc; - -out: - crypto_mod_put(auth_base); - return err; - -err_drop_enc: - crypto_drop_skcipher(&ctx->enc); -err_drop_auth: - crypto_drop_ahash(&ctx->auth); + if (err) { err_free_inst: - kfree(inst); -out_put_auth: - goto out; + crypto_authenc_free(inst); + } + return err; } static struct crypto_template crypto_authenc_tmpl = { -- cgit v1.2.3 From 370738824b8e2c0ea5d8b4e4b4142fb7bab1a403 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:56 -0800 Subject: crypto: authencesn - use crypto_grab_ahash() and simplify error paths Make the authencesn template use the new function crypto_grab_ahash() to initialize its ahash spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authencesn.c | 52 ++++++++++++++-------------------------------------- 1 file changed, 14 insertions(+), 38 deletions(-) (limited to 'crypto') diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 83bda7f905bb..589008146fce 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -393,11 +393,10 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; + struct authenc_esn_instance_ctx *ctx; struct hash_alg_common *auth; struct crypto_alg *auth_base; struct skcipher_alg *enc; - struct authenc_esn_instance_ctx *ctx; - const char *enc_name; int err; algt = crypto_get_attr_type(tb); @@ -409,47 +408,34 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, mask = crypto_requires_sync(algt->type, algt->mask); - auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | mask); - if (IS_ERR(auth)) - return PTR_ERR(auth); - - auth_base = &auth->base; - - enc_name = crypto_attr_alg_name(tb[2]); - err = PTR_ERR(enc_name); - if (IS_ERR(enc_name)) - goto out_put_auth; - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); - err = -ENOMEM; if (!inst) - goto out_put_auth; - + return -ENOMEM; ctx = aead_instance_ctx(inst); - err = crypto_init_ahash_spawn(&ctx->auth, auth, - aead_crypto_instance(inst)); + err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; + auth = crypto_spawn_ahash_alg(&ctx->auth); + auth_base = &auth->base; err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst), - enc_name, 0, mask); + crypto_attr_alg_name(tb[2]), 0, mask); if (err) - goto err_drop_auth; - + goto err_free_inst; enc = crypto_spawn_skcipher_alg(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_name, enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_enc; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_driver_name, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_enc; + goto err_free_inst; inst->alg.base.cra_flags = (auth_base->cra_flags | enc->base.cra_flags) & CRYPTO_ALG_ASYNC; @@ -475,21 +461,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, inst->free = crypto_authenc_esn_free, err = aead_register_instance(tmpl, inst); - if (err) - goto err_drop_enc; - -out: - crypto_mod_put(auth_base); - return err; - -err_drop_enc: - crypto_drop_skcipher(&ctx->enc); -err_drop_auth: - crypto_drop_ahash(&ctx->auth); + if (err) { err_free_inst: - kfree(inst); -out_put_auth: - goto out; + crypto_authenc_esn_free(inst); + } + return err; } static struct crypto_template crypto_authenc_esn_tmpl = { -- cgit v1.2.3 From ab6ffd360d3ca3c98cba401f923b64683d666ab6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:57 -0800 Subject: crypto: gcm - use crypto_grab_ahash() and simplify error paths Make the gcm and gcm_base templates use the new function crypto_grab_ahash() to initialize their ahash spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/gcm.c | 52 ++++++++++++++++------------------------------------ 1 file changed, 16 insertions(+), 36 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index 72649b8cbf2a..8e5c0ac65661 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -13,7 +13,6 @@ #include #include #include -#include "internal.h" #include #include #include @@ -582,10 +581,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; + struct gcm_instance_ctx *ctx; struct skcipher_alg *ctr; - struct crypto_alg *ghash_alg; struct hash_alg_common *ghash; - struct gcm_instance_ctx *ctx; int err; algt = crypto_get_attr_type(tb); @@ -597,35 +595,26 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, mask = crypto_requires_sync(algt->type, algt->mask); - ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, - CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | mask); - if (IS_ERR(ghash_alg)) - return PTR_ERR(ghash_alg); - - ghash = __crypto_hash_alg_common(ghash_alg); - - err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) - goto out_put_ghash; - + return -ENOMEM; ctx = aead_instance_ctx(inst); - err = crypto_init_ahash_spawn(&ctx->ghash, ghash, - aead_crypto_instance(inst)); + + err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst), + ghash_name, 0, mask); if (err) goto err_free_inst; + ghash = crypto_spawn_ahash_alg(&ctx->ghash); err = -EINVAL; if (strcmp(ghash->base.cra_name, "ghash") != 0 || ghash->digestsize != 16) - goto err_drop_ghash; + goto err_free_inst; err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst), ctr_name, 0, mask); if (err) - goto err_drop_ghash; - + goto err_free_inst; ctr = crypto_spawn_skcipher_alg(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ @@ -633,18 +622,18 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || crypto_skcipher_alg_ivsize(ctr) != 16 || ctr->base.cra_blocksize != 1) - goto out_put_ctr; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) - goto out_put_ctr; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->base.cra_driver_name, - ghash_alg->cra_driver_name) >= + ghash->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_put_ctr; + goto err_free_inst; inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; @@ -667,20 +656,11 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, inst->free = crypto_gcm_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_put_ctr; - -out_put_ghash: - crypto_mod_put(ghash_alg); - return err; - -out_put_ctr: - crypto_drop_skcipher(&ctx->ctr); -err_drop_ghash: - crypto_drop_ahash(&ctx->ghash); + if (err) { err_free_inst: - kfree(inst); - goto out_put_ghash; + crypto_gcm_free(inst); + } + return err; } static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) -- cgit v1.2.3 From 05b3bbb53a0570c04aba24e5849a3715e1ed4583 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:58 -0800 Subject: crypto: ccm - use crypto_grab_ahash() and simplify error paths Make the ccm and ccm_base templates use the new function crypto_grab_ahash() to initialize their ahash spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 61 ++++++++++++++++++++---------------------------------------- 1 file changed, 20 insertions(+), 41 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index 48766e81b933..a9fb46f22eaa 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -15,8 +15,6 @@ #include #include -#include "internal.h" - struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn mac; @@ -452,10 +450,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; + struct ccm_instance_ctx *ictx; struct skcipher_alg *ctr; - struct crypto_alg *mac_alg; struct hash_alg_common *mac; - struct ccm_instance_ctx *ictx; int err; algt = crypto_get_attr_type(tb); @@ -467,35 +464,26 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mask = crypto_requires_sync(algt->type, algt->mask); - mac_alg = crypto_find_alg(mac_name, &crypto_ahash_type, - CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | - CRYPTO_ALG_ASYNC); - if (IS_ERR(mac_alg)) - return PTR_ERR(mac_alg); - - mac = __crypto_hash_alg_common(mac_alg); - err = -EINVAL; - if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 || - mac->digestsize != 16) - goto out_put_mac; - inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); - err = -ENOMEM; if (!inst) - goto out_put_mac; - + return -ENOMEM; ictx = aead_instance_ctx(inst); - err = crypto_init_ahash_spawn(&ictx->mac, mac, - aead_crypto_instance(inst)); + + err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), + mac_name, 0, CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; + mac = crypto_spawn_ahash_alg(&ictx->mac); + + err = -EINVAL; + if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 || + mac->digestsize != 16) + goto err_free_inst; err = crypto_grab_skcipher(&ictx->ctr, aead_crypto_instance(inst), ctr_name, 0, mask); if (err) - goto err_drop_mac; - + goto err_free_inst; ctr = crypto_spawn_skcipher_alg(&ictx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ @@ -503,21 +491,21 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || crypto_skcipher_alg_ivsize(ctr) != 16 || ctr->base.cra_blocksize != 1) - goto err_drop_ctr; + goto err_free_inst; /* ctr and cbcmac must use the same underlying block cipher. */ if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0) - goto err_drop_ctr; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_ctr; + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->base.cra_driver_name, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_ctr; + goto err_free_inst; inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + @@ -539,20 +527,11 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->free = crypto_ccm_free; err = aead_register_instance(tmpl, inst); - if (err) - goto err_drop_ctr; - -out_put_mac: - crypto_mod_put(mac_alg); - return err; - -err_drop_ctr: - crypto_drop_skcipher(&ictx->ctr); -err_drop_mac: - crypto_drop_ahash(&ictx->mac); + if (err) { err_free_inst: - kfree(inst); - goto out_put_mac; + crypto_ccm_free(inst); + } + return err; } static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) -- cgit v1.2.3 From c282586fc341f9af741928f74a90163d26a1b347 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:58:59 -0800 Subject: crypto: chacha20poly1305 - use crypto_grab_ahash() and simplify error paths Make the rfc7539 and rfc7539esp templates use the new function crypto_grab_ahash() to initialize their ahash spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 84 +++++++++++++++-------------------------------- 1 file changed, 27 insertions(+), 57 deletions(-) (limited to 'crypto') diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 09d5a34ab339..ccaea5cb66d1 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -16,8 +16,6 @@ #include #include -#include "internal.h" - struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; struct crypto_ahash_spawn poly; @@ -560,11 +558,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; - struct skcipher_alg *chacha; - struct crypto_alg *poly; - struct hash_alg_common *poly_hash; struct chachapoly_instance_ctx *ctx; - const char *chacha_name, *poly_name; + struct skcipher_alg *chacha; + struct hash_alg_common *poly; int err; if (ivsize > CHACHAPOLY_IV_SIZE) @@ -579,68 +575,51 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, mask = crypto_requires_sync(algt->type, algt->mask); - chacha_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(chacha_name)) - return PTR_ERR(chacha_name); - poly_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(poly_name)) - return PTR_ERR(poly_name); - - poly = crypto_find_alg(poly_name, &crypto_ahash_type, - CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK | mask); - if (IS_ERR(poly)) - return PTR_ERR(poly); - poly_hash = __crypto_hash_alg_common(poly); - - err = -EINVAL; - if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) - goto out_put_poly; - - err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) - goto out_put_poly; - + return -ENOMEM; ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; - err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, - aead_crypto_instance(inst)); - if (err) - goto err_free_inst; err = crypto_grab_skcipher(&ctx->chacha, aead_crypto_instance(inst), - chacha_name, 0, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) - goto err_drop_poly; - + goto err_free_inst; chacha = crypto_spawn_skcipher_alg(&ctx->chacha); + err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[2]), 0, mask); + if (err) + goto err_free_inst; + poly = crypto_spawn_ahash_alg(&ctx->poly); + err = -EINVAL; + if (poly->digestsize != POLY1305_DIGEST_SIZE) + goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE) - goto out_drop_chacha; + goto err_free_inst; /* Not a stream cipher? */ if (chacha->base.cra_blocksize != 1) - goto out_drop_chacha; + goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_name, - poly->cra_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_chacha; + poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_driver_name, - poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_chacha; + poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) & - CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags = (chacha->base.cra_flags | + poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + - poly->cra_priority) / 2; + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = chacha->base.cra_alignmask | - poly->cra_alignmask; + poly->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; @@ -656,20 +635,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, inst->free = chachapoly_free; err = aead_register_instance(tmpl, inst); - if (err) - goto out_drop_chacha; - -out_put_poly: - crypto_mod_put(poly); - return err; - -out_drop_chacha: - crypto_drop_skcipher(&ctx->chacha); -err_drop_poly: - crypto_drop_ahash(&ctx->poly); + if (err) { err_free_inst: - kfree(inst); - goto out_put_poly; + chachapoly_free(inst); + } + return err; } static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb) -- cgit v1.2.3 From aacd5b4cfb87306888eb9e3612cb90afbb2ecba5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:00 -0800 Subject: crypto: skcipher - use crypto_grab_cipher() and simplify error paths Make skcipher_alloc_instance_simple() use the new function crypto_grab_cipher() to initialize its cipher spawn. This is needed to make all spawns be initialized in a consistent way. Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/skcipher.c | 39 +++++++++++++++----------------------- include/crypto/internal/skcipher.h | 4 ++-- 2 files changed, 17 insertions(+), 26 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 15c033c960f7..950ff1438131 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -908,7 +908,7 @@ static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) static void skcipher_free_instance_simple(struct skcipher_instance *inst) { - crypto_drop_spawn(skcipher_instance_ctx(inst)); + crypto_drop_cipher(skcipher_instance_ctx(inst)); kfree(inst); } @@ -932,10 +932,10 @@ struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; - struct crypto_alg *cipher_alg; - struct skcipher_instance *inst; - struct crypto_spawn *spawn; u32 mask; + struct skcipher_instance *inst; + struct crypto_cipher_spawn *spawn; + struct crypto_alg *cipher_alg; int err; algt = crypto_get_attr_type(tb); @@ -945,32 +945,25 @@ struct skcipher_instance *skcipher_alloc_instance_simple( if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) return ERR_PTR(-EINVAL); - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - if (IS_ERR(cipher_alg)) - return ERR_CAST(cipher_alg); + mask = crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) { - err = -ENOMEM; - goto err_put_cipher_alg; - } + if (!inst) + return ERR_PTR(-ENOMEM); spawn = skcipher_instance_ctx(inst); - err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, - cipher_alg); + err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; + cipher_alg = crypto_spawn_cipher_alg(spawn); - spawn->dropref = true; - err = crypto_init_spawn(spawn, cipher_alg, - skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, + cipher_alg); if (err) goto err_free_inst; + inst->free = skcipher_free_instance_simple; /* Default algorithm properties, can be overridden */ @@ -990,9 +983,7 @@ struct skcipher_instance *skcipher_alloc_instance_simple( return inst; err_free_inst: - kfree(inst); -err_put_cipher_alg: - crypto_mod_put(cipher_alg); + skcipher_free_instance_simple(inst); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index e387424f6247..10226c12c5df 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -214,9 +214,9 @@ struct skcipher_instance *skcipher_alloc_instance_simple( static inline struct crypto_alg *skcipher_ialg_simple( struct skcipher_instance *inst) { - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); - return spawn->alg; + return crypto_spawn_cipher_alg(spawn); } #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ -- cgit v1.2.3 From 166729709775263066af1747c8029e960a20910a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:01 -0800 Subject: crypto: cbcmac - use crypto_grab_cipher() and simplify error paths Make the cbcmac template use the new function crypto_grab_cipher() to initialize its cipher spawn. This is needed to make all spawns be initialized in a consistent way. This required making cbcmac_create() allocate the instance directly rather than use shash_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index a9fb46f22eaa..411c3973b95c 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -887,6 +887,7 @@ static void cbcmac_exit_tfm(struct crypto_tfm *tfm) static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; + struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; int err; @@ -894,21 +895,20 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) return err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return PTR_ERR(alg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = shash_instance_ctx(inst); - inst = shash_alloc_instance("cbcmac", alg); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; + err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, 0); + if (err) + goto err_free_inst; + alg = crypto_spawn_cipher_alg(spawn); - err = crypto_init_spawn(shash_instance_ctx(inst), alg, - shash_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) - goto out_free_inst; + goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = 1; @@ -928,13 +928,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.setkey = crypto_cbcmac_digest_setkey; err = shash_register_instance(tmpl, inst); - -out_free_inst: - if (err) + if (err) { +err_free_inst: shash_free_instance(shash_crypto_instance(inst)); - -out_put_alg: - crypto_mod_put(alg); + } return err; } -- cgit v1.2.3 From 1d0459cd83f5426d668ec3304a35f9dbeee6296b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:02 -0800 Subject: crypto: cmac - use crypto_grab_cipher() and simplify error paths Make the cmac template use the new function crypto_grab_cipher() to initialize its cipher spawn. This is needed to make all spawns be initialized in a consistent way. This required making cmac_create() allocate the instance directly rather than use shash_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cmac.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'crypto') diff --git a/crypto/cmac.c b/crypto/cmac.c index 0928aebc6205..c6bf78b5321a 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -222,6 +222,7 @@ static void cmac_exit_tfm(struct crypto_tfm *tfm) static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; + struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; int err; @@ -230,10 +231,16 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) return err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return PTR_ERR(alg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = shash_instance_ctx(inst); + + err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, 0); + if (err) + goto err_free_inst; + alg = crypto_spawn_cipher_alg(spawn); switch (alg->cra_blocksize) { case 16: @@ -241,19 +248,12 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) break; default: err = -EINVAL; - goto out_put_alg; + goto err_free_inst; } - inst = shash_alloc_instance("cmac", alg); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; - - err = crypto_init_spawn(shash_instance_ctx(inst), alg, - shash_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) - goto out_free_inst; + goto err_free_inst; alignmask = alg->cra_alignmask; inst->alg.base.cra_alignmask = alignmask; @@ -282,12 +282,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) err = shash_register_instance(tmpl, inst); if (err) { -out_free_inst: +err_free_inst: shash_free_instance(shash_crypto_instance(inst)); } - -out_put_alg: - crypto_mod_put(alg); return err; } -- cgit v1.2.3 From 3b4e73d8ca810f63af05f367c576f0b33920657b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:03 -0800 Subject: crypto: vmac - use crypto_grab_cipher() and simplify error paths Make the vmac64 template use the new function crypto_grab_cipher() to initialize its cipher spawn. This is needed to make all spawns be initialized in a consistent way. This required making vmac_create() allocate the instance directly rather than use shash_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/vmac.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'crypto') diff --git a/crypto/vmac.c b/crypto/vmac.c index 0bbb34dc87c4..9b000aaa20a8 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -618,6 +618,7 @@ static void vmac_exit_tfm(struct crypto_tfm *tfm) static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; + struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; int err; @@ -625,25 +626,24 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) return err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return PTR_ERR(alg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = shash_instance_ctx(inst); + + err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, 0); + if (err) + goto err_free_inst; + alg = crypto_spawn_cipher_alg(spawn); err = -EINVAL; if (alg->cra_blocksize != VMAC_NONCEBYTES) - goto out_put_alg; + goto err_free_inst; - inst = shash_alloc_instance(tmpl->name, alg); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; - - err = crypto_init_spawn(shash_instance_ctx(inst), alg, - shash_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) - goto out_free_inst; + goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; @@ -662,12 +662,9 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) err = shash_register_instance(tmpl, inst); if (err) { -out_free_inst: +err_free_inst: shash_free_instance(shash_crypto_instance(inst)); } - -out_put_alg: - crypto_mod_put(alg); return err; } -- cgit v1.2.3 From 1e212a6a562f781f00cba6c7ece93817857e0f32 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:04 -0800 Subject: crypto: xcbc - use crypto_grab_cipher() and simplify error paths Make the xcbc template use the new function crypto_grab_cipher() to initialize its cipher spawn. This is needed to make all spawns be initialized in a consistent way. This required making xcbc_create() allocate the instance directly rather than use shash_alloc_instance(). Also simplify the error handling by taking advantage of crypto_drop_*() now accepting (as a no-op) spawns that haven't been initialized yet, and by taking advantage of crypto_grab_*() now handling ERR_PTR() names. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xcbc.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 0bb26e8f6f5a..9b97fa511f10 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -188,6 +188,7 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm) static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; + struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; int err; @@ -196,28 +197,24 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) return err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return PTR_ERR(alg); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = shash_instance_ctx(inst); - switch(alg->cra_blocksize) { - case XCBC_BLOCKSIZE: - break; - default: - goto out_put_alg; - } + err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, 0); + if (err) + goto err_free_inst; + alg = crypto_spawn_cipher_alg(spawn); - inst = shash_alloc_instance("xcbc", alg); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; + err = -EINVAL; + if (alg->cra_blocksize != XCBC_BLOCKSIZE) + goto err_free_inst; - err = crypto_init_spawn(shash_instance_ctx(inst), alg, - shash_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); + err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) - goto out_free_inst; + goto err_free_inst; alignmask = alg->cra_alignmask | 3; inst->alg.base.cra_alignmask = alignmask; @@ -244,12 +241,9 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = shash_register_instance(tmpl, inst); if (err) { -out_free_inst: +err_free_inst: shash_free_instance(shash_crypto_instance(inst)); } - -out_put_alg: - crypto_mod_put(alg); return err; } -- cgit v1.2.3 From d5ed3b65f7012a6592809f7f928f3e3660df8fd9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:05 -0800 Subject: crypto: cipher - make crypto_spawn_cipher() take a crypto_cipher_spawn Now that all users of single-block cipher spawns have been converted to use 'struct crypto_cipher_spawn' rather than the less specifically typed 'struct crypto_spawn', make crypto_spawn_cipher() take a pointer to a 'struct crypto_cipher_spawn' rather than a 'struct crypto_spawn'. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 2 +- crypto/ccm.c | 2 +- crypto/cmac.c | 2 +- crypto/skcipher.c | 2 +- crypto/vmac.c | 2 +- crypto/xcbc.c | 2 +- include/crypto/algapi.h | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 5b8aa14ccb55..4d7a6cac82ed 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -408,7 +408,7 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(streamcipher)) return PTR_ERR(streamcipher); - blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn.base); + blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); if (IS_ERR(blockcipher)) { err = PTR_ERR(blockcipher); goto err_free_streamcipher; diff --git a/crypto/ccm.c b/crypto/ccm.c index 411c3973b95c..f4abaefd9df5 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -866,7 +866,7 @@ static int cbcmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); diff --git a/crypto/cmac.c b/crypto/cmac.c index c6bf78b5321a..58dc644416bb 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -201,7 +201,7 @@ static int cmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 950ff1438131..42add1e0814f 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -887,7 +887,7 @@ static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); struct crypto_cipher *cipher; diff --git a/crypto/vmac.c b/crypto/vmac.c index 9b000aaa20a8..28358a6aef9f 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -596,7 +596,7 @@ static int vmac_final(struct shash_desc *desc, u8 *out) static int vmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 9b97fa511f10..9265e00ea663 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -167,7 +167,7 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 7705387f9459..bbf85a854a42 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -234,12 +234,12 @@ static inline struct crypto_alg *crypto_spawn_cipher_alg( } static inline struct crypto_cipher *crypto_spawn_cipher( - struct crypto_spawn *spawn) + struct crypto_cipher_spawn *spawn) { u32 type = CRYPTO_ALG_TYPE_CIPHER; u32 mask = CRYPTO_ALG_TYPE_MASK; - return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); + return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); } static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) -- cgit v1.2.3 From 629f1afc15ee8f34ec67bef0c33b9bccca7eeecc Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:06 -0800 Subject: crypto: algapi - remove obsoleted instance creation helpers Remove lots of helper functions that were previously used for instantiating crypto templates, but are now unused: - crypto_get_attr_alg() and similar functions looked up an inner algorithm directly from a template parameter. These were replaced with getting the algorithm's name, then calling crypto_grab_*(). - crypto_init_spawn2() and similar functions initialized a spawn, given an algorithm. Similarly, these were replaced with crypto_grab_*(). - crypto_alloc_instance() and similar functions allocated an instance with a single spawn, given the inner algorithm. These aren't useful anymore since crypto_grab_*() need the instance allocated first. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 25 ------------------ crypto/algapi.c | 57 ------------------------------------------ crypto/shash.c | 19 -------------- include/crypto/algapi.h | 22 ---------------- include/crypto/internal/hash.h | 31 ----------------------- 5 files changed, 154 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index e98a1398ed7f..2b8449fdb93c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -655,31 +655,6 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); -void ahash_free_instance(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(ahash_instance(inst)); -} -EXPORT_SYMBOL_GPL(ahash_free_instance); - -int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, - struct hash_alg_common *alg, - struct crypto_instance *inst) -{ - return crypto_init_spawn2(&spawn->base, &alg->base, inst, - &crypto_ahash_type); -} -EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); - -struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) -{ - struct crypto_alg *alg; - - alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); - return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); -} -EXPORT_SYMBOL_GPL(ahash_attr_alg); - bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; diff --git a/crypto/algapi.c b/crypto/algapi.c index a25ce02918f8..f66a4ff57e6e 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -697,23 +697,6 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, } EXPORT_SYMBOL_GPL(crypto_init_spawn); -int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, - const struct crypto_type *frontend) -{ - int err = -EINVAL; - - if ((alg->cra_flags ^ frontend->type) & frontend->maskset) - goto out; - - spawn->frontend = frontend; - err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); - -out: - return err; -} -EXPORT_SYMBOL_GPL(crypto_init_spawn2); - int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { @@ -876,20 +859,6 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, - const struct crypto_type *frontend, - u32 type, u32 mask) -{ - const char *name; - - name = crypto_attr_alg_name(rta); - if (IS_ERR(name)) - return ERR_CAST(name); - - return crypto_find_alg(name, frontend, type, mask); -} -EXPORT_SYMBOL_GPL(crypto_attr_alg2); - int crypto_attr_u32(struct rtattr *rta, u32 *num) { struct crypto_attr_u32 *nu32; @@ -923,32 +892,6 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name, } EXPORT_SYMBOL_GPL(crypto_inst_setname); -void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, - unsigned int head) -{ - struct crypto_instance *inst; - char *p; - int err; - - p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), - GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - inst = (void *)(p + head); - - err = crypto_inst_setname(inst, name, alg); - if (err) - goto err_free_inst; - - return p; - -err_free_inst: - kfree(p); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(crypto_alloc_instance); - void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { INIT_LIST_HEAD(&queue->list); diff --git a/crypto/shash.c b/crypto/shash.c index e0872ac2729a..4d6ccb59e126 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -584,24 +584,5 @@ void shash_free_instance(struct crypto_instance *inst) } EXPORT_SYMBOL_GPL(shash_free_instance); -int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, - struct shash_alg *alg, - struct crypto_instance *inst) -{ - return crypto_init_spawn2(&spawn->base, &alg->base, inst, - &crypto_shash_type); -} -EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); - -struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask) -{ - struct crypto_alg *alg; - - alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask); - return IS_ERR(alg) ? ERR_CAST(alg) : - container_of(alg, struct shash_alg, base); -} -EXPORT_SYMBOL_GPL(shash_attr_alg); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Synchronous cryptographic hash type"); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index bbf85a854a42..224c72743cce 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -113,12 +113,8 @@ void crypto_unregister_instance(struct crypto_instance *inst); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, struct crypto_instance *inst, u32 mask); -int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, - const struct crypto_type *frontend); int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); - void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); @@ -127,21 +123,9 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); -struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, - const struct crypto_type *frontend, - u32 type, u32 mask); - -static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, - u32 type, u32 mask) -{ - return crypto_attr_alg2(rta, NULL, type, mask); -} - int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, struct crypto_alg *alg); -void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, - unsigned int head); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, @@ -254,12 +238,6 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, - u32 type, u32 mask) -{ - return crypto_attr_alg(tb[1], type, mask); -} - static inline int crypto_requires_off(u32 type, u32 mask, u32 off) { return (type ^ off) & mask & off; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e1024fa0032f..79e561abef61 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -87,7 +87,6 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count); void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); -void ahash_free_instance(struct crypto_instance *inst); int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); @@ -105,10 +104,6 @@ static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); -int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, - struct hash_alg_common *alg, - struct crypto_instance *inst); - int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); @@ -124,8 +119,6 @@ static inline struct hash_alg_common *crypto_spawn_ahash_alg( return __crypto_hash_alg_common(spawn->base.alg); } -struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); - int crypto_register_shash(struct shash_alg *alg); void crypto_unregister_shash(struct shash_alg *alg); int crypto_register_shashes(struct shash_alg *algs, int count); @@ -134,10 +127,6 @@ int shash_register_instance(struct crypto_template *tmpl, struct shash_instance *inst); void shash_free_instance(struct crypto_instance *inst); -int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, - struct shash_alg *alg, - struct crypto_instance *inst); - int crypto_grab_shash(struct crypto_shash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); @@ -153,8 +142,6 @@ static inline struct shash_alg *crypto_spawn_shash_alg( return __crypto_shash_alg(spawn->base.alg); } -struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); - int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); @@ -195,17 +182,6 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst) return crypto_instance_ctx(ahash_crypto_instance(inst)); } -static inline unsigned int ahash_instance_headroom(void) -{ - return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); -} - -static inline struct ahash_instance *ahash_alloc_instance( - const char *name, struct crypto_alg *alg) -{ - return crypto_alloc_instance(name, alg, ahash_instance_headroom()); -} - static inline void ahash_request_complete(struct ahash_request *req, int err) { req->base.complete(&req->base, err); @@ -262,13 +238,6 @@ static inline void *shash_instance_ctx(struct shash_instance *inst) return crypto_instance_ctx(shash_crypto_instance(inst)); } -static inline struct shash_instance *shash_alloc_instance( - const char *name, struct crypto_alg *alg) -{ - return crypto_alloc_instance(name, alg, - sizeof(struct shash_alg) - sizeof(*alg)); -} - static inline struct crypto_shash *crypto_spawn_shash( struct crypto_shash_spawn *spawn) { -- cgit v1.2.3 From 6d1b41fce0aa916efd1ce0728e1e4bd20a3642d5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:07 -0800 Subject: crypto: ahash - unexport crypto_ahash_type Now that all the templates that need ahash spawns have been converted to use crypto_grab_ahash() rather than look up the algorithm directly, crypto_ahash_type is no longer used outside of ahash.c. Make it static. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 5 +++-- include/crypto/internal/hash.h | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 2b8449fdb93c..c77717fcea8e 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -23,6 +23,8 @@ #include "internal.h" +static const struct crypto_type crypto_ahash_type; + struct ahash_request_priv { crypto_completion_t complete; void *data; @@ -542,7 +544,7 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __crypto_hash_alg_common(alg)->digestsize); } -const struct crypto_type crypto_ahash_type = { +static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, #ifdef CONFIG_PROC_FS @@ -554,7 +556,6 @@ const struct crypto_type crypto_ahash_type = { .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), }; -EXPORT_SYMBOL_GPL(crypto_ahash_type); int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 79e561abef61..c84b7cb29887 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -57,8 +57,6 @@ struct crypto_shash_spawn { struct crypto_spawn base; }; -extern const struct crypto_type crypto_ahash_type; - int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk); -- cgit v1.2.3 From aed11cf57ddb24aa97ca3b55c9e26c37759c4baa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 19:59:08 -0800 Subject: crypto: algapi - fold crypto_init_spawn() into crypto_grab_spawn() Now that crypto_init_spawn() is only called by crypto_grab_spawn(), simplify things by moving its functionality into crypto_grab_spawn(). In the process of doing this, also be more consistent about when the spawn and instance are updated, and remove the crypto_spawn::dropref flag since now it's always set. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 43 ++++++++++++++----------------------------- include/crypto/algapi.h | 3 --- 2 files changed, 14 insertions(+), 32 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index f66a4ff57e6e..72592795c7e7 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -629,8 +629,7 @@ int crypto_register_instance(struct crypto_template *tmpl, spawn->inst = inst; spawn->registered = true; - if (spawn->dropref) - crypto_mod_put(spawn->alg); + crypto_mod_put(spawn->alg); spawn = next; } @@ -672,47 +671,33 @@ void crypto_unregister_instance(struct crypto_instance *inst) } EXPORT_SYMBOL_GPL(crypto_unregister_instance); -int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, u32 mask) +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask) { + struct crypto_alg *alg; int err = -EAGAIN; if (WARN_ON_ONCE(inst == NULL)) return -EINVAL; - spawn->next = inst->spawns; - inst->spawns = spawn; + /* Allow the result of crypto_attr_alg_name() to be passed directly */ + if (IS_ERR(name)) + return PTR_ERR(name); - spawn->mask = mask; + alg = crypto_find_alg(name, spawn->frontend, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); down_write(&crypto_alg_sem); if (!crypto_is_moribund(alg)) { list_add(&spawn->list, &alg->cra_users); spawn->alg = alg; + spawn->mask = mask; + spawn->next = inst->spawns; + inst->spawns = spawn; err = 0; } up_write(&crypto_alg_sem); - - return err; -} -EXPORT_SYMBOL_GPL(crypto_init_spawn); - -int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, - const char *name, u32 type, u32 mask) -{ - struct crypto_alg *alg; - int err; - - /* Allow the result of crypto_attr_alg_name() to be passed directly */ - if (IS_ERR(name)) - return PTR_ERR(name); - - alg = crypto_find_alg(name, spawn->frontend, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - spawn->dropref = true; - err = crypto_init_spawn(spawn, alg, inst, mask); if (err) crypto_mod_put(alg); return err; @@ -729,7 +714,7 @@ void crypto_drop_spawn(struct crypto_spawn *spawn) list_del(&spawn->list); up_write(&crypto_alg_sem); - if (spawn->dropref && !spawn->registered) + if (!spawn->registered) crypto_mod_put(spawn->alg); } EXPORT_SYMBOL_GPL(crypto_drop_spawn); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 224c72743cce..c16c50f8dac1 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -82,7 +82,6 @@ struct crypto_spawn { const struct crypto_type *frontend; u32 mask; bool dead; - bool dropref; bool registered; }; @@ -111,8 +110,6 @@ int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst); void crypto_unregister_instance(struct crypto_instance *inst); -int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst, u32 mask); int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); void crypto_drop_spawn(struct crypto_spawn *spawn); -- cgit v1.2.3 From 48fb3e5785be7ef69a43c04f617a1c05000ee2d0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:35 -0800 Subject: crypto: hash - add support for new way of freeing instances Add support to shash and ahash for the new way of freeing instances (already used for skcipher, aead, and akcipher) where a ->free() method is installed to the instance struct itself. These methods are more strongly-typed than crypto_template::free(), which they replace. This will allow removing support for the old way of freeing instances. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 13 +++++++++++++ crypto/shash.c | 13 +++++++++++++ include/crypto/internal/hash.h | 2 ++ 3 files changed, 28 insertions(+) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index c77717fcea8e..61e374d76b04 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -511,6 +511,18 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) return crypto_alg_extsize(alg); } +static void crypto_ahash_free_instance(struct crypto_instance *inst) +{ + struct ahash_instance *ahash = ahash_instance(inst); + + if (!ahash->free) { + inst->tmpl->free(inst); + return; + } + + ahash->free(ahash); +} + #ifdef CONFIG_NET static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { @@ -547,6 +559,7 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, + .free = crypto_ahash_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif diff --git a/crypto/shash.c b/crypto/shash.c index 4d6ccb59e126..2f6adb49727b 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -423,6 +423,18 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_shash_free_instance(struct crypto_instance *inst) +{ + struct shash_instance *shash = shash_instance(inst); + + if (!shash->free) { + inst->tmpl->free(inst); + return; + } + + shash->free(shash); +} + #ifdef CONFIG_NET static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) { @@ -459,6 +471,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) static const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, + .free = crypto_shash_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_shash_show, #endif diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c84b7cb29887..c550386221bb 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -30,6 +30,7 @@ struct crypto_hash_walk { }; struct ahash_instance { + void (*free)(struct ahash_instance *inst); union { struct { char head[offsetof(struct ahash_alg, halg.base)]; @@ -40,6 +41,7 @@ struct ahash_instance { }; struct shash_instance { + void (*free)(struct shash_instance *inst); union { struct { char head[offsetof(struct shash_alg, base)]; -- cgit v1.2.3 From 0f8f6d86d415f9d88dc0f7847f11d0c52dba1965 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:36 -0800 Subject: crypto: geniv - convert to new way of freeing instances Convert the "seqiv" template to the new way of freeing instances where a ->free() method is installed to the instance struct itself. Also remove the unused implementation of the old way of freeing instances from the "echainiv" template, since it's already using the new way too. In doing this, also simplify the code by making the helper function aead_geniv_alloc() install the ->free() method, instead of making seqiv and echainiv do this themselves. This is analogous to how skcipher_alloc_instance_simple() works. This will allow removing support for the old way of freeing instances. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/echainiv.c | 20 ++++---------------- crypto/geniv.c | 15 ++++++++------- crypto/seqiv.c | 20 ++++---------------- include/crypto/internal/geniv.h | 1 - 4 files changed, 16 insertions(+), 40 deletions(-) (limited to 'crypto') diff --git a/crypto/echainiv.c b/crypto/echainiv.c index a49cbf7b0929..4a2f02baba14 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -133,29 +133,17 @@ static int echainiv_aead_create(struct crypto_template *tmpl, inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; - inst->free = aead_geniv_free; - err = aead_register_instance(tmpl, inst); - if (err) - goto free_inst; - -out: - return err; - + if (err) { free_inst: - aead_geniv_free(inst); - goto out; -} - -static void echainiv_free(struct crypto_instance *inst) -{ - aead_geniv_free(aead_instance(inst)); + inst->free(inst); + } + return err; } static struct crypto_template echainiv_tmpl = { .name = "echainiv", .create = echainiv_aead_create, - .free = echainiv_free, .module = THIS_MODULE, }; diff --git a/crypto/geniv.c b/crypto/geniv.c index 7afa48414f3a..dbcc640274cd 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -32,6 +32,12 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm, return crypto_aead_setauthsize(ctx->child, authsize); } +static void aead_geniv_free(struct aead_instance *inst) +{ + crypto_drop_aead(aead_instance_ctx(inst)); + kfree(inst); +} + struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) { @@ -100,6 +106,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, inst->alg.ivsize = ivsize; inst->alg.maxauthsize = maxauthsize; + inst->free = aead_geniv_free; + out: return inst; @@ -112,13 +120,6 @@ err_free_inst: } EXPORT_SYMBOL_GPL(aead_geniv_alloc); -void aead_geniv_free(struct aead_instance *inst) -{ - crypto_drop_aead(aead_instance_ctx(inst)); - kfree(inst); -} -EXPORT_SYMBOL_GPL(aead_geniv_free); - int aead_init_geniv(struct crypto_aead *aead) { struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 96d222c32acc..f124b9b54e15 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -18,8 +18,6 @@ #include #include -static void seqiv_free(struct crypto_instance *inst); - static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) { struct aead_request *subreq = aead_request_ctx(req); @@ -159,15 +157,11 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_ctxsize += inst->alg.ivsize; err = aead_register_instance(tmpl, inst); - if (err) - goto free_inst; - -out: - return err; - + if (err) { free_inst: - aead_geniv_free(inst); - goto out; + inst->free(inst); + } + return err; } static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -184,15 +178,9 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) return seqiv_aead_create(tmpl, tb); } -static void seqiv_free(struct crypto_instance *inst) -{ - aead_geniv_free(aead_instance(inst)); -} - static struct crypto_template seqiv_tmpl = { .name = "seqiv", .create = seqiv_create, - .free = seqiv_free, .module = THIS_MODULE, }; diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 0108c0c7b2ed..229d37681a9d 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -21,7 +21,6 @@ struct aead_geniv_ctx { struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask); -void aead_geniv_free(struct aead_instance *inst); int aead_init_geniv(struct crypto_aead *tfm); void aead_exit_geniv(struct crypto_aead *tfm); -- cgit v1.2.3 From 758ec5ac5be8923b92c5214d91f6ba1236b95356 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:37 -0800 Subject: crypto: cryptd - convert to new way of freeing instances Convert the "cryptd" template to the new way of freeing instances, where a ->free() method is installed to the instance struct itself. This replaces the weakly-typed method crypto_template::free(). This will allow removing support for the old way of freeing instances. Note that the 'default' case in cryptd_free() was already unreachable. So, we aren't missing anything by keeping only the ahash and aead parts. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cryptd.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index fb03acac7d9a..d94c75c840a5 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -631,6 +631,14 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in) return crypto_shash_import(desc, in); } +static void cryptd_hash_free(struct ahash_instance *inst) +{ + struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); + + crypto_drop_shash(&ctx->spawn); + kfree(inst); +} + static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) { @@ -681,6 +689,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; + inst->free = cryptd_hash_free; + err = ahash_register_instance(tmpl, inst); if (err) { err_free_inst: @@ -808,6 +818,14 @@ static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->child); } +static void cryptd_aead_free(struct aead_instance *inst) +{ + struct aead_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->aead_spawn); + kfree(inst); +} + static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) @@ -857,6 +875,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, inst->alg.encrypt = cryptd_aead_encrypt_enqueue; inst->alg.decrypt = cryptd_aead_decrypt_enqueue; + inst->free = cryptd_aead_free; + err = aead_register_instance(tmpl, inst); if (err) { out_drop_aead: @@ -889,31 +909,9 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static void cryptd_free(struct crypto_instance *inst) -{ - struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); - struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); - struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); - - switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_AHASH: - crypto_drop_shash(&hctx->spawn); - kfree(ahash_instance(inst)); - return; - case CRYPTO_ALG_TYPE_AEAD: - crypto_drop_aead(&aead_ctx->aead_spawn); - kfree(aead_instance(inst)); - return; - default: - crypto_drop_spawn(&ctx->spawn); - kfree(inst); - } -} - static struct crypto_template cryptd_tmpl = { .name = "cryptd", .create = cryptd_create, - .free = cryptd_free, .module = THIS_MODULE, }; -- cgit v1.2.3 From a39c66cc2f6108c8346dc882bdcf72861aaca956 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:38 -0800 Subject: crypto: shash - convert shash_free_instance() to new style Convert shash_free_instance() and its users to the new way of freeing instances, where a ->free() method is installed to the instance struct itself. This replaces the weakly-typed method crypto_template::free(). This will allow removing support for the old way of freeing instances. Also give shash_free_instance() a more descriptive name to reflect that it's only for instances with a single spawn, not for any instance. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 5 +++-- crypto/cmac.c | 5 +++-- crypto/hmac.c | 5 +++-- crypto/shash.c | 8 ++++---- crypto/vmac.c | 5 +++-- crypto/xcbc.c | 5 +++-- include/crypto/internal/hash.h | 2 +- 7 files changed, 20 insertions(+), 15 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index f4abaefd9df5..241ecdc5c4e0 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -927,10 +927,12 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.final = crypto_cbcmac_digest_final; inst->alg.setkey = crypto_cbcmac_digest_setkey; + inst->free = shash_free_singlespawn_instance; + err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: - shash_free_instance(shash_crypto_instance(inst)); + shash_free_singlespawn_instance(inst); } return err; } @@ -939,7 +941,6 @@ static struct crypto_template crypto_ccm_tmpls[] = { { .name = "cbcmac", .create = cbcmac_create, - .free = shash_free_instance, .module = THIS_MODULE, }, { .name = "ccm_base", diff --git a/crypto/cmac.c b/crypto/cmac.c index 58dc644416bb..143a6544c873 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -280,10 +280,12 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.final = crypto_cmac_digest_final; inst->alg.setkey = crypto_cmac_digest_setkey; + inst->free = shash_free_singlespawn_instance; + err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: - shash_free_instance(shash_crypto_instance(inst)); + shash_free_singlespawn_instance(inst); } return err; } @@ -291,7 +293,6 @@ err_free_inst: static struct crypto_template crypto_cmac_tmpl = { .name = "cmac", .create = cmac_create, - .free = shash_free_instance, .module = THIS_MODULE, }; diff --git a/crypto/hmac.c b/crypto/hmac.c index 0a42b7075763..e38bfb948278 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -224,10 +224,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.init_tfm = hmac_init_tfm; inst->alg.exit_tfm = hmac_exit_tfm; + inst->free = shash_free_singlespawn_instance; + err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: - shash_free_instance(shash_crypto_instance(inst)); + shash_free_singlespawn_instance(inst); } return err; } @@ -235,7 +237,6 @@ err_free_inst: static struct crypto_template hmac_tmpl = { .name = "hmac", .create = hmac_create, - .free = shash_free_instance, .module = THIS_MODULE, }; diff --git a/crypto/shash.c b/crypto/shash.c index 2f6adb49727b..e05e75b0f402 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -590,12 +590,12 @@ int shash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(shash_register_instance); -void shash_free_instance(struct crypto_instance *inst) +void shash_free_singlespawn_instance(struct shash_instance *inst) { - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(shash_instance(inst)); + crypto_drop_spawn(shash_instance_ctx(inst)); + kfree(inst); } -EXPORT_SYMBOL_GPL(shash_free_instance); +EXPORT_SYMBOL_GPL(shash_free_singlespawn_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Synchronous cryptographic hash type"); diff --git a/crypto/vmac.c b/crypto/vmac.c index 28358a6aef9f..2d906830df96 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -660,10 +660,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.final = vmac_final; inst->alg.setkey = vmac_setkey; + inst->free = shash_free_singlespawn_instance; + err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: - shash_free_instance(shash_crypto_instance(inst)); + shash_free_singlespawn_instance(inst); } return err; } @@ -671,7 +673,6 @@ err_free_inst: static struct crypto_template vmac64_tmpl = { .name = "vmac64", .create = vmac_create, - .free = shash_free_instance, .module = THIS_MODULE, }; diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 9265e00ea663..598ec88abf0f 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -239,10 +239,12 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.final = crypto_xcbc_digest_final; inst->alg.setkey = crypto_xcbc_digest_setkey; + inst->free = shash_free_singlespawn_instance; + err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: - shash_free_instance(shash_crypto_instance(inst)); + shash_free_singlespawn_instance(inst); } return err; } @@ -250,7 +252,6 @@ err_free_inst: static struct crypto_template crypto_xcbc_tmpl = { .name = "xcbc", .create = xcbc_create, - .free = shash_free_instance, .module = THIS_MODULE, }; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c550386221bb..89f6f46ab2b8 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -125,7 +125,7 @@ int crypto_register_shashes(struct shash_alg *algs, int count); void crypto_unregister_shashes(struct shash_alg *algs, int count); int shash_register_instance(struct crypto_template *tmpl, struct shash_instance *inst); -void shash_free_instance(struct crypto_instance *inst); +void shash_free_singlespawn_instance(struct shash_instance *inst); int crypto_grab_shash(struct crypto_shash_spawn *spawn, struct crypto_instance *inst, -- cgit v1.2.3 From a24a1fd731274ebbca873000e2c7fbe8224ae4c8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:39 -0800 Subject: crypto: algapi - remove crypto_template::{alloc,free}() Now that all templates provide a ->create() method which creates an instance, installs a strongly-typed ->free() method directly to it, and registers it, the older ->alloc() and ->free() methods in 'struct crypto_template' are no longer used. Remove them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/aead.c | 5 ----- crypto/ahash.c | 5 ----- crypto/algapi.c | 5 ----- crypto/algboss.c | 12 +----------- crypto/shash.c | 5 ----- include/crypto/algapi.h | 2 -- 6 files changed, 1 insertion(+), 33 deletions(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index 02a0db076d7e..7707d3223101 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -185,11 +185,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst) { struct aead_instance *aead = aead_instance(inst); - if (!aead->free) { - inst->tmpl->free(inst); - return; - } - aead->free(aead); } diff --git a/crypto/ahash.c b/crypto/ahash.c index 61e374d76b04..cd5d9847d513 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -515,11 +515,6 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst) { struct ahash_instance *ahash = ahash_instance(inst); - if (!ahash->free) { - inst->tmpl->free(inst); - return; - } - ahash->free(ahash); } diff --git a/crypto/algapi.c b/crypto/algapi.c index 72592795c7e7..69605e21af92 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -65,11 +65,6 @@ static int crypto_check_alg(struct crypto_alg *alg) static void crypto_free_instance(struct crypto_instance *inst) { - if (!inst->alg.cra_type->free) { - inst->tmpl->free(inst); - return; - } - inst->alg.cra_type->free(inst); } diff --git a/crypto/algboss.c b/crypto/algboss.c index a62149d6c839..535f1f87e6c1 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -58,7 +58,6 @@ static int cryptomgr_probe(void *data) { struct cryptomgr_param *param = data; struct crypto_template *tmpl; - struct crypto_instance *inst; int err; tmpl = crypto_lookup_template(param->template); @@ -66,16 +65,7 @@ static int cryptomgr_probe(void *data) goto out; do { - if (tmpl->create) { - err = tmpl->create(tmpl, param->tb); - continue; - } - - inst = tmpl->alloc(param->tb); - if (IS_ERR(inst)) - err = PTR_ERR(inst); - else if ((err = crypto_register_instance(tmpl, inst))) - tmpl->free(inst); + err = tmpl->create(tmpl, param->tb); } while (err == -EAGAIN && !signal_pending(current)); crypto_tmpl_put(tmpl); diff --git a/crypto/shash.c b/crypto/shash.c index e05e75b0f402..70faf28b2d14 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -427,11 +427,6 @@ static void crypto_shash_free_instance(struct crypto_instance *inst) { struct shash_instance *shash = shash_instance(inst); - if (!shash->free) { - inst->tmpl->free(inst); - return; - } - shash->free(shash); } diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index c16c50f8dac1..e115f9215ed5 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -63,8 +63,6 @@ struct crypto_template { struct hlist_head instances; struct module *module; - struct crypto_instance *(*alloc)(struct rtattr **tb); - void (*free)(struct crypto_instance *inst); int (*create)(struct crypto_template *tmpl, struct rtattr **tb); char name[CRYPTO_MAX_ALG_NAME]; -- cgit v1.2.3 From d4fdc2dfaa755e0bf22de6a2774cac2e5ae45cf4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Jan 2020 20:04:40 -0800 Subject: crypto: algapi - enforce that all instances have a ->free() method All instances need to have a ->free() method, but people could forget to set it and then not notice if the instance is never unregistered. To help detect this bug earlier, don't allow an instance without a ->free() method to be registered, and complain loudly if someone tries to do it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/aead.c | 3 +++ crypto/ahash.c | 3 +++ crypto/akcipher.c | 2 ++ crypto/shash.c | 3 +++ crypto/skcipher.c | 3 +++ 5 files changed, 14 insertions(+) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index 7707d3223101..16991095270d 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -288,6 +288,9 @@ int aead_register_instance(struct crypto_template *tmpl, { int err; + if (WARN_ON(!inst->free)) + return -EINVAL; + err = aead_prepare_alg(&inst->alg); if (err) return err; diff --git a/crypto/ahash.c b/crypto/ahash.c index cd5d9847d513..68a0f0cb75c4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -656,6 +656,9 @@ int ahash_register_instance(struct crypto_template *tmpl, { int err; + if (WARN_ON(!inst->free)) + return -EINVAL; + err = ahash_prepare_alg(&inst->alg); if (err) return err; diff --git a/crypto/akcipher.c b/crypto/akcipher.c index eeed6c151d2f..f866085c8a4a 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -147,6 +147,8 @@ EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); int akcipher_register_instance(struct crypto_template *tmpl, struct akcipher_instance *inst) { + if (WARN_ON(!inst->free)) + return -EINVAL; akcipher_prepare_alg(&inst->alg); return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); } diff --git a/crypto/shash.c b/crypto/shash.c index 70faf28b2d14..c075b26c2a1d 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -577,6 +577,9 @@ int shash_register_instance(struct crypto_template *tmpl, { int err; + if (WARN_ON(!inst->free)) + return -EINVAL; + err = shash_prepare_alg(&inst->alg); if (err) return err; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 42add1e0814f..7221def7b9a7 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -865,6 +865,9 @@ int skcipher_register_instance(struct crypto_template *tmpl, { int err; + if (WARN_ON(!inst->free)) + return -EINVAL; + err = skcipher_prepare_alg(&inst->alg); if (err) return err; -- cgit v1.2.3 From 1c08a104360f3e18f4ee6346c21cc3923efb952e Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 5 Jan 2020 22:40:46 -0500 Subject: crypto: poly1305 - add new 32 and 64-bit generic versions These two C implementations from Zinc -- a 32x32 one and a 64x64 one, depending on the platform -- come from Andrew Moon's public domain poly1305-donna portable code, modified for usage in the kernel. The precomputation in the 32-bit version and the use of 64x64 multiplies in the 64-bit version make these perform better than the code it replaces. Moon's code is also very widespread and has received many eyeballs of scrutiny. There's a bit of interference between the x86 implementation, which relies on internal details of the old scalar implementation. In the next commit, the x86 implementation will be replaced with a faster one that doesn't rely on this, so none of this matters much. But for now, to keep this passing the tests, we inline the bits of the old implementation that the x86 implementation relied on. Also, since we now support a slightly larger key space, via the union, some offsets had to be fixed up. Nonce calculation was folded in with the emit function, to take advantage of 64x64 arithmetic. However, Adiantum appeared to rely on no nonce handling in emit, so this path was conditionalized. We also introduced a new struct, poly1305_core_key, to represent the precise amount of space that particular implementation uses. Testing with kbench9000, depending on the CPU, the update function for the 32x32 version has been improved by 4%-7%, and for the 64x64 by 19%-30%. The 32x32 gains are small, but I think there's great value in having a parallel implementation to the 64x64 one so that the two can be compared side-by-side as nice stand-alone units. Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- arch/x86/crypto/poly1305-avx2-x86_64.S | 20 +-- arch/x86/crypto/poly1305_glue.c | 215 +++++++++++++++++++++++++++++++-- crypto/adiantum.c | 4 +- crypto/nhpoly1305.c | 2 +- crypto/poly1305_generic.c | 25 +++- include/crypto/internal/poly1305.h | 45 ++----- include/crypto/nhpoly1305.h | 4 +- include/crypto/poly1305.h | 26 +++- lib/crypto/Makefile | 4 +- lib/crypto/poly1305-donna32.c | 204 +++++++++++++++++++++++++++++++ lib/crypto/poly1305-donna64.c | 185 ++++++++++++++++++++++++++++ lib/crypto/poly1305.c | 169 ++------------------------ 12 files changed, 675 insertions(+), 228 deletions(-) create mode 100644 lib/crypto/poly1305-donna32.c create mode 100644 lib/crypto/poly1305-donna64.c (limited to 'crypto') diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index d6063feda9da..8f56989ea599 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -34,16 +34,16 @@ ORMASK: .octa 0x00000000010000000000000001000000 #define u2 0x08(%r8) #define u3 0x0c(%r8) #define u4 0x10(%r8) -#define w0 0x14(%r8) -#define w1 0x18(%r8) -#define w2 0x1c(%r8) -#define w3 0x20(%r8) -#define w4 0x24(%r8) -#define y0 0x28(%r8) -#define y1 0x2c(%r8) -#define y2 0x30(%r8) -#define y3 0x34(%r8) -#define y4 0x38(%r8) +#define w0 0x18(%r8) +#define w1 0x1c(%r8) +#define w2 0x20(%r8) +#define w3 0x24(%r8) +#define w4 0x28(%r8) +#define y0 0x30(%r8) +#define y1 0x34(%r8) +#define y2 0x38(%r8) +#define y3 0x3c(%r8) +#define y4 0x40(%r8) #define m %rsi #define hc0 %ymm0 #define hc1 %ymm1 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 0cc4537e6617..edb7113e36f3 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -25,6 +25,21 @@ asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd); static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); +static inline u64 mlt(u64 a, u64 b) +{ + return a * b; +} + +static inline u32 sr(u64 v, u_char n) +{ + return v >> n; +} + +static inline u32 and(u32 v, u32 mask) +{ + return v & mask; +} + static void poly1305_simd_mult(u32 *a, const u32 *b) { u8 m[POLY1305_BLOCK_SIZE]; @@ -36,6 +51,168 @@ static void poly1305_simd_mult(u32 *a, const u32 *b) poly1305_block_sse2(a, m, b, 1); } +static void poly1305_integer_setkey(struct poly1305_key *key, const u8 *raw_key) +{ + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; + key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; + key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; + key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; + key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; +} + +static void poly1305_integer_blocks(struct poly1305_state *state, + const struct poly1305_key *key, + const void *src, + unsigned int nblocks, u32 hibit) +{ + u32 r0, r1, r2, r3, r4; + u32 s1, s2, s3, s4; + u32 h0, h1, h2, h3, h4; + u64 d0, d1, d2, d3, d4; + + if (!nblocks) + return; + + r0 = key->r[0]; + r1 = key->r[1]; + r2 = key->r[2]; + r3 = key->r[3]; + r4 = key->r[4]; + + s1 = r1 * 5; + s2 = r2 * 5; + s3 = r3 * 5; + s4 = r4 * 5; + + h0 = state->h[0]; + h1 = state->h[1]; + h2 = state->h[2]; + h3 = state->h[3]; + h4 = state->h[4]; + + do { + /* h += m[i] */ + h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; + h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; + h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; + h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; + h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); + + /* h *= r */ + d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + + mlt(h3, s2) + mlt(h4, s1); + d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + + mlt(h3, s3) + mlt(h4, s2); + d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + + mlt(h3, s4) + mlt(h4, s3); + d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + + mlt(h3, r0) + mlt(h4, s4); + d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + + mlt(h3, r1) + mlt(h4, r0); + + /* (partial) h %= p */ + d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); + d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); + d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); + d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); + h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); + h1 += h0 >> 26; h0 = h0 & 0x3ffffff; + + src += POLY1305_BLOCK_SIZE; + } while (--nblocks); + + state->h[0] = h0; + state->h[1] = h1; + state->h[2] = h2; + state->h[3] = h3; + state->h[4] = h4; +} + +static void poly1305_integer_emit(const struct poly1305_state *state, void *dst) +{ + u32 h0, h1, h2, h3, h4; + u32 g0, g1, g2, g3, g4; + u32 mask; + + /* fully carry h */ + h0 = state->h[0]; + h1 = state->h[1]; + h2 = state->h[2]; + h3 = state->h[3]; + h4 = state->h[4]; + + h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; + h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; + h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; + h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; + h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; + + /* compute h + -p */ + g0 = h0 + 5; + g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; + g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; + g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; + g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; + + /* select h if h < p, or h + -p if h >= p */ + mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; + g0 &= mask; + g1 &= mask; + g2 &= mask; + g3 &= mask; + g4 &= mask; + mask = ~mask; + h0 = (h0 & mask) | g0; + h1 = (h1 & mask) | g1; + h2 = (h2 & mask) | g2; + h3 = (h3 & mask) | g3; + h4 = (h4 & mask) | g4; + + /* h = h % (2^128) */ + put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); + put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); + put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); + put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); +} + +void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) +{ + poly1305_integer_setkey(desc->opaque_r, key); + desc->s[0] = get_unaligned_le32(key + 16); + desc->s[1] = get_unaligned_le32(key + 20); + desc->s[2] = get_unaligned_le32(key + 24); + desc->s[3] = get_unaligned_le32(key + 28); + poly1305_core_init(&desc->h); + desc->buflen = 0; + desc->sset = true; + desc->rset = 1; +} +EXPORT_SYMBOL_GPL(poly1305_init_arch); + +static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + if (!dctx->sset) { + if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { + poly1305_integer_setkey(dctx->r, src); + src += POLY1305_BLOCK_SIZE; + srclen -= POLY1305_BLOCK_SIZE; + dctx->rset = 1; + } + if (srclen >= POLY1305_BLOCK_SIZE) { + dctx->s[0] = get_unaligned_le32(src + 0); + dctx->s[1] = get_unaligned_le32(src + 4); + dctx->s[2] = get_unaligned_le32(src + 8); + dctx->s[3] = get_unaligned_le32(src + 12); + src += POLY1305_BLOCK_SIZE; + srclen -= POLY1305_BLOCK_SIZE; + dctx->sset = true; + } + } + return srclen; +} + static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -47,8 +224,8 @@ static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx, srclen = datalen; } if (srclen >= POLY1305_BLOCK_SIZE) { - poly1305_core_blocks(&dctx->h, dctx->r, src, - srclen / POLY1305_BLOCK_SIZE, 1); + poly1305_integer_blocks(&dctx->h, dctx->opaque_r, src, + srclen / POLY1305_BLOCK_SIZE, 1); srclen %= POLY1305_BLOCK_SIZE; } return srclen; @@ -105,12 +282,6 @@ static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, return srclen; } -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) -{ - poly1305_init_generic(desc, key); -} -EXPORT_SYMBOL(poly1305_init_arch); - void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -158,9 +329,31 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, } EXPORT_SYMBOL(poly1305_update_arch); -void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest) +void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *dst) { - poly1305_final_generic(desc, digest); + __le32 digest[4]; + u64 f = 0; + + if (unlikely(desc->buflen)) { + desc->buf[desc->buflen++] = 1; + memset(desc->buf + desc->buflen, 0, + POLY1305_BLOCK_SIZE - desc->buflen); + poly1305_integer_blocks(&desc->h, desc->opaque_r, desc->buf, 1, 0); + } + + poly1305_integer_emit(&desc->h, digest); + + /* mac = (h + s) % (2^128) */ + f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; + put_unaligned_le32(f, dst + 0); + f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; + put_unaligned_le32(f, dst + 4); + f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; + put_unaligned_le32(f, dst + 8); + f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; + put_unaligned_le32(f, dst + 12); + + *desc = (struct poly1305_desc_ctx){}; } EXPORT_SYMBOL(poly1305_final_arch); @@ -183,7 +376,7 @@ static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) if (unlikely(!dctx->sset)) return -ENOKEY; - poly1305_final_generic(dctx, dst); + poly1305_final_arch(dctx, dst); return 0; } diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 4d7a6cac82ed..53d5e705a425 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -70,7 +70,7 @@ struct adiantum_tfm_ctx { struct crypto_skcipher *streamcipher; struct crypto_cipher *blockcipher; struct crypto_shash *hash; - struct poly1305_key header_hash_key; + struct poly1305_core_key header_hash_key; }; struct adiantum_request_ctx { @@ -239,7 +239,7 @@ static void adiantum_hash_header(struct skcipher_request *req) poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); - poly1305_core_emit(&state, &rctx->header_hash); + poly1305_core_emit(&state, NULL, &rctx->header_hash); } /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c index f6b6a52092b4..8a3006c3b51b 100644 --- a/crypto/nhpoly1305.c +++ b/crypto/nhpoly1305.c @@ -210,7 +210,7 @@ int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn) if (state->nh_remaining) process_nh_hash_value(state, key); - poly1305_core_emit(&state->poly_state, dst); + poly1305_core_emit(&state->poly_state, NULL, dst); return 0; } EXPORT_SYMBOL(crypto_nhpoly1305_final_helper); diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 21edbd8c99fb..94af47eb6fa6 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -31,6 +31,29 @@ static int crypto_poly1305_init(struct shash_desc *desc) return 0; } +static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + if (!dctx->sset) { + if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { + poly1305_core_setkey(&dctx->core_r, src); + src += POLY1305_BLOCK_SIZE; + srclen -= POLY1305_BLOCK_SIZE; + dctx->rset = 2; + } + if (srclen >= POLY1305_BLOCK_SIZE) { + dctx->s[0] = get_unaligned_le32(src + 0); + dctx->s[1] = get_unaligned_le32(src + 4); + dctx->s[2] = get_unaligned_le32(src + 8); + dctx->s[3] = get_unaligned_le32(src + 12); + src += POLY1305_BLOCK_SIZE; + srclen -= POLY1305_BLOCK_SIZE; + dctx->sset = true; + } + } + return srclen; +} + static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -42,7 +65,7 @@ static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, srclen = datalen; } - poly1305_core_blocks(&dctx->h, dctx->r, src, + poly1305_core_blocks(&dctx->h, &dctx->core_r, src, srclen / POLY1305_BLOCK_SIZE, 1); } diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h index 479b0cab2a1a..064e52ca5248 100644 --- a/include/crypto/internal/poly1305.h +++ b/include/crypto/internal/poly1305.h @@ -11,48 +11,23 @@ #include /* - * Poly1305 core functions. These implement the ε-almost-∆-universal hash - * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce - * ("s key") at the end. They also only support block-aligned inputs. + * Poly1305 core functions. These only accept whole blocks; the caller must + * handle any needed block buffering and padding. 'hibit' must be 1 for any + * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is + * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise, + * only the ε-almost-∆-universal hash function (not the full MAC) is computed. */ -void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); + +void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); static inline void poly1305_core_init(struct poly1305_state *state) { *state = (struct poly1305_state){}; } void poly1305_core_blocks(struct poly1305_state *state, - const struct poly1305_key *key, const void *src, + const struct poly1305_core_key *key, const void *src, unsigned int nblocks, u32 hibit); -void poly1305_core_emit(const struct poly1305_state *state, void *dst); - -/* - * Poly1305 requires a unique key for each tag, which implies that we can't set - * it on the tfm that gets accessed by multiple users simultaneously. Instead we - * expect the key as the first 32 bytes in the update() call. - */ -static inline -unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) -{ - if (!dctx->sset) { - if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { - poly1305_core_setkey(dctx->r, src); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (srclen >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return srclen; -} +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst); #endif diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h index 53c04423c582..306925fea190 100644 --- a/include/crypto/nhpoly1305.h +++ b/include/crypto/nhpoly1305.h @@ -7,7 +7,7 @@ #define _NHPOLY1305_H #include -#include +#include /* NH parameterization: */ @@ -33,7 +33,7 @@ #define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) struct nhpoly1305_key { - struct poly1305_key poly_key; + struct poly1305_core_key poly_key; u32 nh_key[NH_KEY_WORDS]; }; diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index 74c6e1cd73ee..f1f67fc749cf 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -13,12 +13,29 @@ #define POLY1305_KEY_SIZE 32 #define POLY1305_DIGEST_SIZE 16 +/* The poly1305_key and poly1305_state types are mostly opaque and + * implementation-defined. Limbs might be in base 2^64 or base 2^26, or + * different yet. The union type provided keeps these 64-bit aligned for the + * case in which this is implemented using 64x64 multiplies. + */ + struct poly1305_key { - u32 r[5]; /* key, base 2^26 */ + union { + u32 r[5]; + u64 r64[3]; + }; +}; + +struct poly1305_core_key { + struct poly1305_key key; + struct poly1305_key precomputed_s; }; struct poly1305_state { - u32 h[5]; /* accumulator, base 2^26 */ + union { + u32 h[5]; + u64 h64[3]; + }; }; struct poly1305_desc_ctx { @@ -35,7 +52,10 @@ struct poly1305_desc_ctx { /* accumulator */ struct poly1305_state h; /* key */ - struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; + union { + struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; + struct poly1305_core_key core_r; + }; }; void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index f97f9b941110..6ecaf83a5a9a 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -28,7 +28,9 @@ obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o libdes-y := des.o obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o -libpoly1305-y := poly1305.o +libpoly1305-y := poly1305-donna32.o +libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o +libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c new file mode 100644 index 000000000000..3cc77d94390b --- /dev/null +++ b/lib/crypto/poly1305-donna32.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +#include +#include +#include + +void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +{ + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; + key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03; + key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff; + key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff; + key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff; + + /* s = 5*r */ + key->precomputed_s.r[0] = key->key.r[1] * 5; + key->precomputed_s.r[1] = key->key.r[2] * 5; + key->precomputed_s.r[2] = key->key.r[3] * 5; + key->precomputed_s.r[3] = key->key.r[4] * 5; +} +EXPORT_SYMBOL(poly1305_core_setkey); + +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_core_key *key, const void *src, + unsigned int nblocks, u32 hibit) +{ + const u8 *input = src; + u32 r0, r1, r2, r3, r4; + u32 s1, s2, s3, s4; + u32 h0, h1, h2, h3, h4; + u64 d0, d1, d2, d3, d4; + u32 c; + + if (!nblocks) + return; + + hibit <<= 24; + + r0 = key->key.r[0]; + r1 = key->key.r[1]; + r2 = key->key.r[2]; + r3 = key->key.r[3]; + r4 = key->key.r[4]; + + s1 = key->precomputed_s.r[0]; + s2 = key->precomputed_s.r[1]; + s3 = key->precomputed_s.r[2]; + s4 = key->precomputed_s.r[3]; + + h0 = state->h[0]; + h1 = state->h[1]; + h2 = state->h[2]; + h3 = state->h[3]; + h4 = state->h[4]; + + do { + /* h += m[i] */ + h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff; + h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff; + h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff; + h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff; + h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit; + + /* h *= r */ + d0 = ((u64)h0 * r0) + ((u64)h1 * s4) + + ((u64)h2 * s3) + ((u64)h3 * s2) + + ((u64)h4 * s1); + d1 = ((u64)h0 * r1) + ((u64)h1 * r0) + + ((u64)h2 * s4) + ((u64)h3 * s3) + + ((u64)h4 * s2); + d2 = ((u64)h0 * r2) + ((u64)h1 * r1) + + ((u64)h2 * r0) + ((u64)h3 * s4) + + ((u64)h4 * s3); + d3 = ((u64)h0 * r3) + ((u64)h1 * r2) + + ((u64)h2 * r1) + ((u64)h3 * r0) + + ((u64)h4 * s4); + d4 = ((u64)h0 * r4) + ((u64)h1 * r3) + + ((u64)h2 * r2) + ((u64)h3 * r1) + + ((u64)h4 * r0); + + /* (partial) h %= p */ + c = (u32)(d0 >> 26); + h0 = (u32)d0 & 0x3ffffff; + d1 += c; + c = (u32)(d1 >> 26); + h1 = (u32)d1 & 0x3ffffff; + d2 += c; + c = (u32)(d2 >> 26); + h2 = (u32)d2 & 0x3ffffff; + d3 += c; + c = (u32)(d3 >> 26); + h3 = (u32)d3 & 0x3ffffff; + d4 += c; + c = (u32)(d4 >> 26); + h4 = (u32)d4 & 0x3ffffff; + h0 += c * 5; + c = (h0 >> 26); + h0 = h0 & 0x3ffffff; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + } while (--nblocks); + + state->h[0] = h0; + state->h[1] = h1; + state->h[2] = h2; + state->h[3] = h3; + state->h[4] = h4; +} +EXPORT_SYMBOL(poly1305_core_blocks); + +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst) +{ + u8 *mac = dst; + u32 h0, h1, h2, h3, h4, c; + u32 g0, g1, g2, g3, g4; + u64 f; + u32 mask; + + /* fully carry h */ + h0 = state->h[0]; + h1 = state->h[1]; + h2 = state->h[2]; + h3 = state->h[3]; + h4 = state->h[4]; + + c = h1 >> 26; + h1 = h1 & 0x3ffffff; + h2 += c; + c = h2 >> 26; + h2 = h2 & 0x3ffffff; + h3 += c; + c = h3 >> 26; + h3 = h3 & 0x3ffffff; + h4 += c; + c = h4 >> 26; + h4 = h4 & 0x3ffffff; + h0 += c * 5; + c = h0 >> 26; + h0 = h0 & 0x3ffffff; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 26; + g0 &= 0x3ffffff; + g1 = h1 + c; + c = g1 >> 26; + g1 &= 0x3ffffff; + g2 = h2 + c; + c = g2 >> 26; + g2 &= 0x3ffffff; + g3 = h3 + c; + c = g3 >> 26; + g3 &= 0x3ffffff; + g4 = h4 + c - (1UL << 26); + + /* select h if h < p, or h + -p if h >= p */ + mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; + g0 &= mask; + g1 &= mask; + g2 &= mask; + g3 &= mask; + g4 &= mask; + mask = ~mask; + + h0 = (h0 & mask) | g0; + h1 = (h1 & mask) | g1; + h2 = (h2 & mask) | g2; + h3 = (h3 & mask) | g3; + h4 = (h4 & mask) | g4; + + /* h = h % (2^128) */ + h0 = ((h0) | (h1 << 26)) & 0xffffffff; + h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff; + h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff; + h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff; + + if (likely(nonce)) { + /* mac = (h + nonce) % (2^128) */ + f = (u64)h0 + nonce[0]; + h0 = (u32)f; + f = (u64)h1 + nonce[1] + (f >> 32); + h1 = (u32)f; + f = (u64)h2 + nonce[2] + (f >> 32); + h2 = (u32)f; + f = (u64)h3 + nonce[3] + (f >> 32); + h3 = (u32)f; + } + + put_unaligned_le32(h0, &mac[0]); + put_unaligned_le32(h1, &mac[4]); + put_unaligned_le32(h2, &mac[8]); + put_unaligned_le32(h3, &mac[12]); +} +EXPORT_SYMBOL(poly1305_core_emit); diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c new file mode 100644 index 000000000000..6ae181bb4345 --- /dev/null +++ b/lib/crypto/poly1305-donna64.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + * + * This is based in part on Andrew Moon's poly1305-donna, which is in the + * public domain. + */ + +#include +#include +#include + +typedef __uint128_t u128; + +void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +{ + u64 t0, t1; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + t0 = get_unaligned_le64(&raw_key[0]); + t1 = get_unaligned_le64(&raw_key[8]); + + key->key.r64[0] = t0 & 0xffc0fffffffULL; + key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL; + key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL; + + /* s = 20*r */ + key->precomputed_s.r64[0] = key->key.r64[1] * 20; + key->precomputed_s.r64[1] = key->key.r64[2] * 20; +} +EXPORT_SYMBOL(poly1305_core_setkey); + +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_core_key *key, const void *src, + unsigned int nblocks, u32 hibit) +{ + const u8 *input = src; + u64 hibit64; + u64 r0, r1, r2; + u64 s1, s2; + u64 h0, h1, h2; + u64 c; + u128 d0, d1, d2, d; + + if (!nblocks) + return; + + hibit64 = ((u64)hibit) << 40; + + r0 = key->key.r64[0]; + r1 = key->key.r64[1]; + r2 = key->key.r64[2]; + + h0 = state->h64[0]; + h1 = state->h64[1]; + h2 = state->h64[2]; + + s1 = key->precomputed_s.r64[0]; + s2 = key->precomputed_s.r64[1]; + + do { + u64 t0, t1; + + /* h += m[i] */ + t0 = get_unaligned_le64(&input[0]); + t1 = get_unaligned_le64(&input[8]); + + h0 += t0 & 0xfffffffffffULL; + h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64; + + /* h *= r */ + d0 = (u128)h0 * r0; + d = (u128)h1 * s2; + d0 += d; + d = (u128)h2 * s1; + d0 += d; + d1 = (u128)h0 * r1; + d = (u128)h1 * r0; + d1 += d; + d = (u128)h2 * s2; + d1 += d; + d2 = (u128)h0 * r2; + d = (u128)h1 * r1; + d2 += d; + d = (u128)h2 * r0; + d2 += d; + + /* (partial) h %= p */ + c = (u64)(d0 >> 44); + h0 = (u64)d0 & 0xfffffffffffULL; + d1 += c; + c = (u64)(d1 >> 44); + h1 = (u64)d1 & 0xfffffffffffULL; + d2 += c; + c = (u64)(d2 >> 42); + h2 = (u64)d2 & 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 = h0 & 0xfffffffffffULL; + h1 += c; + + input += POLY1305_BLOCK_SIZE; + } while (--nblocks); + + state->h64[0] = h0; + state->h64[1] = h1; + state->h64[2] = h2; +} +EXPORT_SYMBOL(poly1305_core_blocks); + +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst) +{ + u8 *mac = dst; + u64 h0, h1, h2, c; + u64 g0, g1, g2; + u64 t0, t1; + + /* fully carry h */ + h0 = state->h64[0]; + h1 = state->h64[1]; + h2 = state->h64[2]; + + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += c; + c = h2 >> 42; + h2 &= 0x3ffffffffffULL; + h0 += c * 5; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; + c = g0 >> 44; + g0 &= 0xfffffffffffULL; + g1 = h1 + c; + c = g1 >> 44; + g1 &= 0xfffffffffffULL; + g2 = h2 + c - (1ULL << 42); + + /* select h if h < p, or h + -p if h >= p */ + c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1; + g0 &= c; + g1 &= c; + g2 &= c; + c = ~c; + h0 = (h0 & c) | g0; + h1 = (h1 & c) | g1; + h2 = (h2 & c) | g2; + + if (likely(nonce)) { + /* h = (h + nonce) */ + t0 = ((u64)nonce[1] << 32) | nonce[0]; + t1 = ((u64)nonce[3] << 32) | nonce[2]; + + h0 += t0 & 0xfffffffffffULL; + c = h0 >> 44; + h0 &= 0xfffffffffffULL; + h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c; + c = h1 >> 44; + h1 &= 0xfffffffffffULL; + h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c; + h2 &= 0x3ffffffffffULL; + } + + /* mac = h % (2^128) */ + h0 = h0 | (h1 << 44); + h1 = (h1 >> 20) | (h2 << 24); + + put_unaligned_le64(h0, &mac[0]); + put_unaligned_le64(h1, &mac[8]); +} +EXPORT_SYMBOL(poly1305_core_emit); diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 32ec293c65ae..9d2d14df0fee 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -12,151 +12,9 @@ #include #include -static inline u64 mlt(u64 a, u64 b) -{ - return a * b; -} - -static inline u32 sr(u64 v, u_char n) -{ - return v >> n; -} - -static inline u32 and(u32 v, u32 mask) -{ - return v & mask; -} - -void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key) -{ - /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ - key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; - key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; - key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; - key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; - key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; -} -EXPORT_SYMBOL_GPL(poly1305_core_setkey); - -void poly1305_core_blocks(struct poly1305_state *state, - const struct poly1305_key *key, const void *src, - unsigned int nblocks, u32 hibit) -{ - u32 r0, r1, r2, r3, r4; - u32 s1, s2, s3, s4; - u32 h0, h1, h2, h3, h4; - u64 d0, d1, d2, d3, d4; - - if (!nblocks) - return; - - r0 = key->r[0]; - r1 = key->r[1]; - r2 = key->r[2]; - r3 = key->r[3]; - r4 = key->r[4]; - - s1 = r1 * 5; - s2 = r2 * 5; - s3 = r3 * 5; - s4 = r4 * 5; - - h0 = state->h[0]; - h1 = state->h[1]; - h2 = state->h[2]; - h3 = state->h[3]; - h4 = state->h[4]; - - do { - /* h += m[i] */ - h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; - h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; - h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; - h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; - h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); - - /* h *= r */ - d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + - mlt(h3, s2) + mlt(h4, s1); - d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + - mlt(h3, s3) + mlt(h4, s2); - d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + - mlt(h3, s4) + mlt(h4, s3); - d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + - mlt(h3, r0) + mlt(h4, s4); - d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + - mlt(h3, r1) + mlt(h4, r0); - - /* (partial) h %= p */ - d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); - d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); - d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); - d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); - h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); - h1 += h0 >> 26; h0 = h0 & 0x3ffffff; - - src += POLY1305_BLOCK_SIZE; - } while (--nblocks); - - state->h[0] = h0; - state->h[1] = h1; - state->h[2] = h2; - state->h[3] = h3; - state->h[4] = h4; -} -EXPORT_SYMBOL_GPL(poly1305_core_blocks); - -void poly1305_core_emit(const struct poly1305_state *state, void *dst) -{ - u32 h0, h1, h2, h3, h4; - u32 g0, g1, g2, g3, g4; - u32 mask; - - /* fully carry h */ - h0 = state->h[0]; - h1 = state->h[1]; - h2 = state->h[2]; - h3 = state->h[3]; - h4 = state->h[4]; - - h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; - h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; - h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; - h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; - h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; - - /* compute h + -p */ - g0 = h0 + 5; - g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; - g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; - g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; - g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; - - /* select h if h < p, or h + -p if h >= p */ - mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; - g0 &= mask; - g1 &= mask; - g2 &= mask; - g3 &= mask; - g4 &= mask; - mask = ~mask; - h0 = (h0 & mask) | g0; - h1 = (h1 & mask) | g1; - h2 = (h2 & mask) | g2; - h3 = (h3 & mask) | g3; - h4 = (h4 & mask) | g4; - - /* h = h % (2^128) */ - put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); - put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); - put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); - put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); -} -EXPORT_SYMBOL_GPL(poly1305_core_emit); - void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) { - poly1305_core_setkey(desc->r, key); + poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); desc->s[1] = get_unaligned_le32(key + 20); desc->s[2] = get_unaligned_le32(key + 24); @@ -164,7 +22,7 @@ void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) poly1305_core_init(&desc->h); desc->buflen = 0; desc->sset = true; - desc->rset = 1; + desc->rset = 2; } EXPORT_SYMBOL_GPL(poly1305_init_generic); @@ -181,13 +39,14 @@ void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, desc->buflen += bytes; if (desc->buflen == POLY1305_BLOCK_SIZE) { - poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 1); + poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, + 1, 1); desc->buflen = 0; } } if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - poly1305_core_blocks(&desc->h, desc->r, src, + poly1305_core_blocks(&desc->h, &desc->core_r, src, nbytes / POLY1305_BLOCK_SIZE, 1); src += nbytes - (nbytes % POLY1305_BLOCK_SIZE); nbytes %= POLY1305_BLOCK_SIZE; @@ -202,28 +61,14 @@ EXPORT_SYMBOL_GPL(poly1305_update_generic); void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst) { - __le32 digest[4]; - u64 f = 0; - if (unlikely(desc->buflen)) { desc->buf[desc->buflen++] = 1; memset(desc->buf + desc->buflen, 0, POLY1305_BLOCK_SIZE - desc->buflen); - poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 0); + poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0); } - poly1305_core_emit(&desc->h, digest); - - /* mac = (h + s) % (2^128) */ - f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; - put_unaligned_le32(f, dst + 0); - f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; - put_unaligned_le32(f, dst + 4); - f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; - put_unaligned_le32(f, dst + 8); - f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; - put_unaligned_le32(f, dst + 12); - + poly1305_core_emit(&desc->h, desc->s, dst); *desc = (struct poly1305_desc_ctx){}; } EXPORT_SYMBOL_GPL(poly1305_final_generic); -- cgit v1.2.3 From ab3d436bf3e9d05f58ceaa85ff7475bfcd6e45af Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 12 Jan 2020 17:58:58 +0100 Subject: crypto: essiv - fix AEAD capitalization and preposition use in help text "AEAD" is capitalized everywhere else. Use "an" when followed by a written or spoken vowel. Fixes: be1eb7f78aa8fbe3 ("crypto: essiv - create wrapper template for ESSIV generation") Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 5575d48473bd..cdb51d4272d0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -511,10 +511,10 @@ config CRYPTO_ESSIV encryption. This driver implements a crypto API template that can be - instantiated either as a skcipher or as a aead (depending on the + instantiated either as an skcipher or as an AEAD (depending on the type of the first template argument), and which defers encryption and decryption requests to the encapsulated cipher after applying - ESSIV to the input IV. Note that in the aead case, it is assumed + ESSIV to the input IV. Note that in the AEAD case, it is assumed that the keys are presented in the same format used by the authenc template, and that the IV appears at the end of the authenticated associated data (AAD) region (which is how dm-crypt uses it.) -- cgit v1.2.3