From aa2faec1a05f1ebc2856be911f83e696f8dc2122 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Wed, 10 Jul 2013 06:27:21 +0000 Subject: crypto: caam - Moved macro DESC_JOB_IO_LEN to desc_constr.h DESC_JOB_IO_LEN is a generic macro which indicates the space required in the descriptor for placing SEQIN/OUT commands, job descriptor header, shared descriptor pointer. Moving it to descriptor construction file which can be supposedly included by different algo offload files. Change-Id: Ic8900990d465e9079827b0c7fcacc61766d7efb6 Signed-off-by: Vakul Garg Reviewed-by: Geanta Neag Horia Ioan-B05471 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 2 -- drivers/crypto/caam/caamhash.c | 2 -- drivers/crypto/caam/desc_constr.h | 1 + 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index bf416a8391a7..7a9052c44238 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -65,8 +65,6 @@ #define CAAM_MAX_IV_LENGTH 16 /* length of descriptors text */ -#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) - #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 5996521a1caf..77ad2b68f8f3 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -72,8 +72,6 @@ #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE /* length of descriptors text */ -#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) - #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index fe3bfd1b08ca..cd5f678847ce 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -10,6 +10,7 @@ #define CAAM_CMD_SZ sizeof(u32) #define CAAM_PTR_SZ sizeof(dma_addr_t) #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) +#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) #ifdef DEBUG #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ -- cgit v1.2.3 From 997ad2900ac13b8afcfc45ce79bf662551a501eb Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Thu, 4 Jul 2013 11:26:03 +0530 Subject: crypto: caam - RNG instantiation by directly programming DECO Remove the dependency of RNG instantiation on Job Ring. Now RNG instantiation for devices with RNG version > 4 is done by directly programming DECO 0. Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 74 ++++++++++++++++++++++------------------------ drivers/crypto/caam/regs.h | 12 +++++++- 2 files changed, 47 insertions(+), 39 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f5d6deced1cb..86c96009faad 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -75,55 +75,53 @@ static void build_instantiation_desc(u32 *desc) OP_ALG_RNG4_SK); } -struct instantiate_result { - struct completion completion; - int err; -}; - -static void rng4_init_done(struct device *dev, u32 *desc, u32 err, - void *context) +static int instantiate_rng(struct device *ctrldev) { - struct instantiate_result *instantiation = context; - - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; - - dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); - } - - instantiation->err = err; - complete(&instantiation->completion); -} - -static int instantiate_rng(struct device *jrdev) -{ - struct instantiate_result instantiation; - - dma_addr_t desc_dma; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_full __iomem *topregs; + unsigned int timeout = 100000; u32 *desc; - int ret; + int i, ret = 0; desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); if (!desc) { - dev_err(jrdev, "cannot allocate RNG init descriptor memory\n"); + dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); return -ENOMEM; } - build_instantiation_desc(desc); - desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); - init_completion(&instantiation.completion); - ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation); - if (!ret) { - wait_for_completion_interruptible(&instantiation.completion); - ret = instantiation.err; - if (ret) - dev_err(jrdev, "unable to instantiate RNG\n"); + + /* Set the bit to request direct access to DECO0 */ + topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; + setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); + + while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && + --timeout) + cpu_relax(); + + if (!timeout) { + dev_err(ctrldev, "failed to acquire DECO 0\n"); + ret = -EIO; + goto out; } - dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE); + for (i = 0; i < desc_len(desc); i++) + topregs->deco.descbuf[i] = *(desc + i); - kfree(desc); + wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); + + timeout = 10000000; + while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && + --timeout) + cpu_relax(); + if (!timeout) { + dev_err(ctrldev, "failed to instantiate RNG\n"); + ret = -EIO; + } + + clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); +out: + kfree(desc); return ret; } @@ -303,7 +301,7 @@ static int caam_probe(struct platform_device *pdev) if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { kick_trng(pdev); - ret = instantiate_rng(ctrlpriv->jrdev[0]); + ret = instantiate_rng(dev); if (ret) { caam_remove(pdev); return ret; diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index c09142fc13e3..4455396918de 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -341,6 +341,8 @@ struct caam_ctrl { #define MCFGR_DMA_RESET 0x10000000 #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ #define SCFGR_RDBENABLE 0x00000400 +#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ +#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ /* AXI read cache control */ #define MCFGR_ARCACHE_SHIFT 12 @@ -703,9 +705,16 @@ struct caam_deco { struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ u32 rsvd29[48]; u32 descbuf[64]; /* DxDESB - Descriptor buffer */ - u32 rsvd30[320]; + u32 rscvd30[193]; + u32 desc_dbg; /* DxDDR - DECO Debug Register */ + u32 rsvd31[126]; }; +/* DECO DBG Register Valid Bit*/ +#define DECO_DBG_VALID 0x80000000 +#define DECO_JQCR_WHL 0x20000000 +#define DECO_JQCR_FOUR 0x10000000 + /* * Current top-level view of memory map is: * @@ -733,6 +742,7 @@ struct caam_full { u64 rsvd[512]; struct caam_assurance assure; struct caam_queue_if qi; + struct caam_deco deco; }; #endif /* REGS_H */ -- cgit v1.2.3 From 69d2884debaa029ddcf9de4631c4c83249bc8c4d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 16 Jul 2013 14:06:48 -0700 Subject: crypto: ux500 - Fix logging, make arrays const, neatening Logging messages without newlines are possibly interleaved with other messages. Add terminating newlines to avoid this. Other miscellaneous changes: Make arrays const to reduce data size Add pr_fmt to prefix pr_, remove now unused DEV_DBG_NAME Coalesce formats, align arguments Remove unnecessary OOM messages as dump_stack is already done Remove unnecessary cast of void * Change kzalloc(sizeof(struct)...) to kzalloc(sizeof(*var), ...) Reduce indents in struct definitions Signed-off-by: Joe Perches Acked-by: Linus Walleij Signed-off-by: Herbert Xu --- drivers/crypto/ux500/hash/hash_core.c | 586 ++++++++++++++++------------------ 1 file changed, 279 insertions(+), 307 deletions(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 496ae6aae316..1c73f4fbc252 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -11,6 +11,8 @@ * License terms: GNU General Public License (GPL) version 2 */ +#define pr_fmt(fmt) "hashX hashX: " fmt + #include #include #include @@ -35,8 +37,6 @@ #include "hash_alg.h" -#define DEV_DBG_NAME "hashX hashX:" - static int hash_mode; module_param(hash_mode, int, 0); MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); @@ -44,13 +44,13 @@ MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); /** * Pre-calculated empty message digests. */ -static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { +static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09 }; -static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { +static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, @@ -58,14 +58,14 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { }; /* HMAC-SHA1, no key */ -static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { +static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, 0x70, 0x69, 0x0e, 0x1d }; /* HMAC-SHA256, no key */ -static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { +static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, @@ -97,7 +97,7 @@ static struct hash_driver_data driver_data; * */ static void hash_messagepad(struct hash_device_data *device_data, - const u32 *message, u8 index_bytes); + const u32 *message, u8 index_bytes); /** * release_hash_device - Releases a previously allocated hash device. @@ -119,7 +119,7 @@ static void release_hash_device(struct hash_device_data *device_data) } static void hash_dma_setup_channel(struct hash_device_data *device_data, - struct device *dev) + struct device *dev) { struct hash_platform_data *platform_data = dev->platform_data; struct dma_slave_config conf = { @@ -127,7 +127,7 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, .dst_addr = device_data->phybase + HASH_DMA_FIFO, .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, .dst_maxburst = 16, - }; + }; dma_cap_zero(device_data->dma.mask); dma_cap_set(DMA_SLAVE, device_data->dma.mask); @@ -135,8 +135,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; device_data->dma.chan_mem2hash = dma_request_channel(device_data->dma.mask, - platform_data->dma_filter, - device_data->dma.cfg_mem2hash); + platform_data->dma_filter, + device_data->dma.cfg_mem2hash); dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); @@ -145,21 +145,21 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, static void hash_dma_callback(void *data) { - struct hash_ctx *ctx = (struct hash_ctx *) data; + struct hash_ctx *ctx = data; complete(&ctx->device->dma.complete); } static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, - int len, enum dma_data_direction direction) + int len, enum dma_data_direction direction) { struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *channel = NULL; dma_cookie_t cookie; if (direction != DMA_TO_DEVICE) { - dev_err(ctx->device->dev, "[%s] Invalid DMA direction", - __func__); + dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", + __func__); return -EFAULT; } @@ -172,20 +172,19 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, direction); if (!ctx->device->dma.sg_len) { - dev_err(ctx->device->dev, - "[%s]: Could not map the sg list (TO_DEVICE)", - __func__); + dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", + __func__); return -EFAULT; } - dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " - "(TO_DEVICE)", __func__); + dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", + __func__); desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(ctx->device->dev, - "[%s]: device_prep_slave_sg() failed!", __func__); + "%s: device_prep_slave_sg() failed!\n", __func__); return -EFAULT; } @@ -205,17 +204,16 @@ static void hash_dma_done(struct hash_ctx *ctx) chan = ctx->device->dma.chan_mem2hash; dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, - ctx->device->dma.sg_len, DMA_TO_DEVICE); - + ctx->device->dma.sg_len, DMA_TO_DEVICE); } static int hash_dma_write(struct hash_ctx *ctx, - struct scatterlist *sg, int len) + struct scatterlist *sg, int len) { int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); if (error) { - dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " - "failed", __func__); + dev_dbg(ctx->device->dev, + "%s: hash_set_dma_transfer() failed\n", __func__); return error; } @@ -245,19 +243,18 @@ static int get_empty_message_digest( if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { if (HASH_ALGO_SHA1 == ctx->config.algorithm) { memcpy(zero_hash, &zero_message_hash_sha1[0], - SHA1_DIGEST_SIZE); + SHA1_DIGEST_SIZE); *zero_hash_size = SHA1_DIGEST_SIZE; *zero_digest = true; } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { memcpy(zero_hash, &zero_message_hash_sha256[0], - SHA256_DIGEST_SIZE); + SHA256_DIGEST_SIZE); *zero_hash_size = SHA256_DIGEST_SIZE; *zero_digest = true; } else { - dev_err(device_data->dev, "[%s] " - "Incorrect algorithm!" - , __func__); + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", + __func__); ret = -EINVAL; goto out; } @@ -265,25 +262,24 @@ static int get_empty_message_digest( if (!ctx->keylen) { if (HASH_ALGO_SHA1 == ctx->config.algorithm) { memcpy(zero_hash, &zero_message_hmac_sha1[0], - SHA1_DIGEST_SIZE); + SHA1_DIGEST_SIZE); *zero_hash_size = SHA1_DIGEST_SIZE; *zero_digest = true; } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { memcpy(zero_hash, &zero_message_hmac_sha256[0], - SHA256_DIGEST_SIZE); + SHA256_DIGEST_SIZE); *zero_hash_size = SHA256_DIGEST_SIZE; *zero_digest = true; } else { - dev_err(device_data->dev, "[%s] " - "Incorrect algorithm!" - , __func__); + dev_err(device_data->dev, "%s: Incorrect algorithm!\n", + __func__); ret = -EINVAL; goto out; } } else { - dev_dbg(device_data->dev, "[%s] Continue hash " - "calculation, since hmac key avalable", - __func__); + dev_dbg(device_data->dev, + "%s: Continue hash calculation, since hmac key available\n", + __func__); } } out: @@ -299,9 +295,8 @@ out: * This function request for disabling power (regulator) and clock, * and could also save current hw state. */ -static int hash_disable_power( - struct hash_device_data *device_data, - bool save_device_state) +static int hash_disable_power(struct hash_device_data *device_data, + bool save_device_state) { int ret = 0; struct device *dev = device_data->dev; @@ -319,7 +314,7 @@ static int hash_disable_power( clk_disable(device_data->clk); ret = regulator_disable(device_data->regulator); if (ret) - dev_err(dev, "[%s] regulator_disable() failed!", __func__); + dev_err(dev, "%s: regulator_disable() failed!\n", __func__); device_data->power_state = false; @@ -337,9 +332,8 @@ out: * This function request for enabling power (regulator) and clock, * and could also restore a previously saved hw state. */ -static int hash_enable_power( - struct hash_device_data *device_data, - bool restore_device_state) +static int hash_enable_power(struct hash_device_data *device_data, + bool restore_device_state) { int ret = 0; struct device *dev = device_data->dev; @@ -348,14 +342,13 @@ static int hash_enable_power( if (!device_data->power_state) { ret = regulator_enable(device_data->regulator); if (ret) { - dev_err(dev, "[%s]: regulator_enable() failed!", - __func__); + dev_err(dev, "%s: regulator_enable() failed!\n", + __func__); goto out; } ret = clk_enable(device_data->clk); if (ret) { - dev_err(dev, "[%s]: clk_enable() failed!", - __func__); + dev_err(dev, "%s: clk_enable() failed!\n", __func__); ret = regulator_disable( device_data->regulator); goto out; @@ -366,8 +359,7 @@ static int hash_enable_power( if (device_data->restore_dev_state) { if (restore_device_state) { device_data->restore_dev_state = false; - hash_resume_state(device_data, - &device_data->state); + hash_resume_state(device_data, &device_data->state); } } out: @@ -447,7 +439,7 @@ static int hash_get_device_data(struct hash_ctx *ctx, * spec or due to a bug in the hw. */ static void hash_hw_write_key(struct hash_device_data *device_data, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { u32 word = 0; int nwords = 1; @@ -491,14 +483,14 @@ static void hash_hw_write_key(struct hash_device_data *device_data, * calculation. */ static int init_hash_hw(struct hash_device_data *device_data, - struct hash_ctx *ctx) + struct hash_ctx *ctx) { int ret = 0; ret = hash_setconfiguration(device_data, &ctx->config); if (ret) { - dev_err(device_data->dev, "[%s] hash_setconfiguration() " - "failed!", __func__); + dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", + __func__); return ret; } @@ -528,9 +520,8 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) size -= sg->length; /* hash_set_dma_transfer will align last nent */ - if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) - || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && - size > 0)) + if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || + (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) aligned_data = false; sg = sg_next(sg); @@ -585,21 +576,17 @@ static int hash_init(struct ahash_request *req) if (req->nbytes < HASH_DMA_ALIGN_SIZE) { req_ctx->dma_mode = false; /* Don't use DMA */ - pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " - "to CPU mode for data size < %d", - __func__, HASH_DMA_ALIGN_SIZE); + pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", + __func__, HASH_DMA_ALIGN_SIZE); } else { if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && - hash_dma_valid_data(req->src, - req->nbytes)) { + hash_dma_valid_data(req->src, req->nbytes)) { req_ctx->dma_mode = true; } else { req_ctx->dma_mode = false; - pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" - " CPU mode for datalength < %d" - " or non-aligned data, except " - "in last nent", __func__, - HASH_DMA_PERFORMANCE_MIN_SIZE); + pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", + __func__, + HASH_DMA_PERFORMANCE_MIN_SIZE); } } } @@ -614,9 +601,8 @@ static int hash_init(struct ahash_request *req) * the HASH hardware. * */ -static void hash_processblock( - struct hash_device_data *device_data, - const u32 *message, int length) +static void hash_processblock(struct hash_device_data *device_data, + const u32 *message, int length) { int len = length / HASH_BYTES_PER_WORD; /* @@ -641,7 +627,7 @@ static void hash_processblock( * */ static void hash_messagepad(struct hash_device_data *device_data, - const u32 *message, u8 index_bytes) + const u32 *message, u8 index_bytes) { int nwords = 1; @@ -666,15 +652,13 @@ static void hash_messagepad(struct hash_device_data *device_data, /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ HASH_SET_NBLW(index_bytes * 8); - dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, - readl_relaxed(&device_data->base->din), - (int)(readl_relaxed(&device_data->base->str) & - HASH_STR_NBLW_MASK)); + dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", + __func__, readl_relaxed(&device_data->base->din), + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); HASH_SET_DCAL; - dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", - __func__, readl_relaxed(&device_data->base->din), - (int)(readl_relaxed(&device_data->base->str) & - HASH_STR_NBLW_MASK)); + dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", + __func__, readl_relaxed(&device_data->base->din), + readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); @@ -704,7 +688,7 @@ static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) * @config: Pointer to a configuration structure. */ int hash_setconfiguration(struct hash_device_data *device_data, - struct hash_config *config) + struct hash_config *config) { int ret = 0; @@ -731,8 +715,8 @@ int hash_setconfiguration(struct hash_device_data *device_data, break; default: - dev_err(device_data->dev, "[%s] Incorrect algorithm.", - __func__); + dev_err(device_data->dev, "%s: Incorrect algorithm\n", + __func__); return -EPERM; } @@ -744,23 +728,22 @@ int hash_setconfiguration(struct hash_device_data *device_data, HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); else if (HASH_OPER_MODE_HMAC == config->oper_mode) { - HASH_SET_BITS(&device_data->base->cr, - HASH_CR_MODE_MASK); + HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { /* Truncate key to blocksize */ - dev_dbg(device_data->dev, "[%s] LKEY set", __func__); + dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); HASH_SET_BITS(&device_data->base->cr, - HASH_CR_LKEY_MASK); + HASH_CR_LKEY_MASK); } else { - dev_dbg(device_data->dev, "[%s] LKEY cleared", - __func__); + dev_dbg(device_data->dev, "%s: LKEY cleared\n", + __func__); HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_LKEY_MASK); } } else { /* Wrong hash mode */ ret = -EPERM; - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", - __func__); + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); } return ret; } @@ -793,8 +776,9 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) } static int hash_process_data(struct hash_device_data *device_data, - struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, - int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) + struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, + int msg_length, u8 *data_buffer, u8 *buffer, + u8 *index) { int ret = 0; u32 count; @@ -809,24 +793,23 @@ static int hash_process_data(struct hash_device_data *device_data, msg_length = 0; } else { if (req_ctx->updated) { - ret = hash_resume_state(device_data, &device_data->state); memmove(req_ctx->state.buffer, - device_data->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + device_data->state.buffer, + HASH_BLOCK_SIZE / sizeof(u32)); if (ret) { - dev_err(device_data->dev, "[%s] " - "hash_resume_state()" - " failed!", __func__); + dev_err(device_data->dev, + "%s: hash_resume_state() failed!\n", + __func__); goto out; } } else { ret = init_hash_hw(device_data, ctx); if (ret) { - dev_err(device_data->dev, "[%s] " - "init_hash_hw()" - " failed!", __func__); + dev_err(device_data->dev, + "%s: init_hash_hw() failed!\n", + __func__); goto out; } req_ctx->updated = 1; @@ -838,22 +821,21 @@ static int hash_process_data(struct hash_device_data *device_data, * HW peripheral, otherwise we first copy data * to a local buffer */ - if ((0 == (((u32)data_buffer) % 4)) - && (0 == *index)) + if ((0 == (((u32)data_buffer) % 4)) && + (0 == *index)) hash_processblock(device_data, - (const u32 *) - data_buffer, HASH_BLOCK_SIZE); + (const u32 *)data_buffer, + HASH_BLOCK_SIZE); else { - for (count = 0; count < - (u32)(HASH_BLOCK_SIZE - - *index); - count++) { + for (count = 0; + count < (u32)(HASH_BLOCK_SIZE - *index); + count++) { buffer[*index + count] = *(data_buffer + count); } hash_processblock(device_data, - (const u32 *)buffer, - HASH_BLOCK_SIZE); + (const u32 *)buffer, + HASH_BLOCK_SIZE); } hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); data_buffer += (HASH_BLOCK_SIZE - *index); @@ -865,12 +847,11 @@ static int hash_process_data(struct hash_device_data *device_data, &device_data->state); memmove(device_data->state.buffer, - req_ctx->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + req_ctx->state.buffer, + HASH_BLOCK_SIZE / sizeof(u32)); if (ret) { - dev_err(device_data->dev, "[%s] " - "hash_save_state()" - " failed!", __func__); + dev_err(device_data->dev, "%s: hash_save_state() failed!\n", + __func__); goto out; } } @@ -898,25 +879,24 @@ static int hash_dma_final(struct ahash_request *req) if (ret) return ret; - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); if (req_ctx->updated) { ret = hash_resume_state(device_data, &device_data->state); if (ret) { - dev_err(device_data->dev, "[%s] hash_resume_state() " - "failed!", __func__); + dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", + __func__); goto out; } - } if (!req_ctx->updated) { ret = hash_setconfiguration(device_data, &ctx->config); if (ret) { - dev_err(device_data->dev, "[%s] " - "hash_setconfiguration() failed!", - __func__); + dev_err(device_data->dev, + "%s: hash_setconfiguration() failed!\n", + __func__); goto out; } @@ -926,9 +906,9 @@ static int hash_dma_final(struct ahash_request *req) HASH_CR_DMAE_MASK); } else { HASH_SET_BITS(&device_data->base->cr, - HASH_CR_DMAE_MASK); + HASH_CR_DMAE_MASK); HASH_SET_BITS(&device_data->base->cr, - HASH_CR_PRIVN_MASK); + HASH_CR_PRIVN_MASK); } HASH_INITIALIZE; @@ -944,16 +924,16 @@ static int hash_dma_final(struct ahash_request *req) /* Store the nents in the dma struct. */ ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); if (!ctx->device->dma.nents) { - dev_err(device_data->dev, "[%s] " - "ctx->device->dma.nents = 0", __func__); + dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", + __func__); ret = ctx->device->dma.nents; goto out; } bytes_written = hash_dma_write(ctx, req->src, req->nbytes); if (bytes_written != req->nbytes) { - dev_err(device_data->dev, "[%s] " - "hash_dma_write() failed!", __func__); + dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", + __func__); ret = bytes_written; goto out; } @@ -968,8 +948,8 @@ static int hash_dma_final(struct ahash_request *req) unsigned int keylen = ctx->keylen; u8 *key = ctx->key; - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, - ctx->keylen); + dev_dbg(device_data->dev, "%s: keylen: %d\n", + __func__, ctx->keylen); hash_hw_write_key(device_data, key, keylen); } @@ -1004,14 +984,14 @@ static int hash_hw_final(struct ahash_request *req) if (ret) return ret; - dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); + dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); if (req_ctx->updated) { ret = hash_resume_state(device_data, &device_data->state); if (ret) { - dev_err(device_data->dev, "[%s] hash_resume_state() " - "failed!", __func__); + dev_err(device_data->dev, + "%s: hash_resume_state() failed!\n", __func__); goto out; } } else if (req->nbytes == 0 && ctx->keylen == 0) { @@ -1025,31 +1005,33 @@ static int hash_hw_final(struct ahash_request *req) ret = get_empty_message_digest(device_data, &zero_hash[0], &zero_hash_size, &zero_digest); if (!ret && likely(zero_hash_size == ctx->digestsize) && - zero_digest) { + zero_digest) { memcpy(req->result, &zero_hash[0], ctx->digestsize); goto out; } else if (!ret && !zero_digest) { - dev_dbg(device_data->dev, "[%s] HMAC zero msg with " - "key, continue...", __func__); + dev_dbg(device_data->dev, + "%s: HMAC zero msg with key, continue...\n", + __func__); } else { - dev_err(device_data->dev, "[%s] ret=%d, or wrong " - "digest size? %s", __func__, ret, - (zero_hash_size == ctx->digestsize) ? - "true" : "false"); + dev_err(device_data->dev, + "%s: ret=%d, or wrong digest size? %s\n", + __func__, ret, + zero_hash_size == ctx->digestsize ? + "true" : "false"); /* Return error */ goto out; } } else if (req->nbytes == 0 && ctx->keylen > 0) { - dev_err(device_data->dev, "[%s] Empty message with " - "keylength > 0, NOT supported.", __func__); + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", + __func__); goto out; } if (!req_ctx->updated) { ret = init_hash_hw(device_data, ctx); if (ret) { - dev_err(device_data->dev, "[%s] init_hash_hw() " - "failed!", __func__); + dev_err(device_data->dev, + "%s: init_hash_hw() failed!\n", __func__); goto out; } } @@ -1067,8 +1049,8 @@ static int hash_hw_final(struct ahash_request *req) unsigned int keylen = ctx->keylen; u8 *key = ctx->key; - dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, - ctx->keylen); + dev_dbg(device_data->dev, "%s: keylen: %d\n", + __func__, ctx->keylen); hash_hw_write_key(device_data, key, keylen); } @@ -1115,10 +1097,8 @@ int hash_hw_update(struct ahash_request *req) /* Check if ctx->state.length + msg_length overflows */ if (msg_length > (req_ctx->state.length.low_word + msg_length) && - HASH_HIGH_WORD_MAX_VAL == - req_ctx->state.length.high_word) { - pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", - __func__); + HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { + pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); return -EPERM; } @@ -1133,8 +1113,8 @@ int hash_hw_update(struct ahash_request *req) data_buffer, buffer, &index); if (ret) { - dev_err(device_data->dev, "[%s] hash_internal_hw_" - "update() failed!", __func__); + dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", + __func__); goto out; } @@ -1142,9 +1122,8 @@ int hash_hw_update(struct ahash_request *req) } req_ctx->state.index = index; - dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", - __func__, req_ctx->state.index, - req_ctx->state.bit_index); + dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", + __func__, req_ctx->state.index, req_ctx->state.bit_index); out: release_hash_device(device_data); @@ -1158,23 +1137,23 @@ out: * @device_state: The state to be restored in the hash hardware */ int hash_resume_state(struct hash_device_data *device_data, - const struct hash_state *device_state) + const struct hash_state *device_state) { u32 temp_cr; s32 count; int hash_mode = HASH_OPER_MODE_HASH; if (NULL == device_state) { - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", - __func__); + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); return -EPERM; } /* Check correctness of index and length members */ - if (device_state->index > HASH_BLOCK_SIZE - || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", - __func__); + if (device_state->index > HASH_BLOCK_SIZE || + (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); return -EPERM; } @@ -1198,7 +1177,7 @@ int hash_resume_state(struct hash_device_data *device_data, break; writel_relaxed(device_state->csr[count], - &device_data->base->csrx[count]); + &device_data->base->csrx[count]); } writel_relaxed(device_state->csfull, &device_data->base->csfull); @@ -1216,15 +1195,15 @@ int hash_resume_state(struct hash_device_data *device_data, * @device_state: The strucure where the hardware state should be saved. */ int hash_save_state(struct hash_device_data *device_data, - struct hash_state *device_state) + struct hash_state *device_state) { u32 temp_cr; u32 count; int hash_mode = HASH_OPER_MODE_HASH; if (NULL == device_state) { - dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", - __func__); + dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", + __func__); return -ENOTSUPP; } @@ -1270,20 +1249,18 @@ int hash_save_state(struct hash_device_data *device_data, int hash_check_hw(struct hash_device_data *device_data) { /* Checking Peripheral Ids */ - if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) - && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) - && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) - && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) - && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) - && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) - && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) - && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) - ) { + if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && + HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && + HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && + HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && + HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && + HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && + HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && + HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { return 0; } - dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", - __func__); + dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); return -ENOTSUPP; } @@ -1294,14 +1271,14 @@ int hash_check_hw(struct hash_device_data *device_data) * @algorithm: The algorithm in use. */ void hash_get_digest(struct hash_device_data *device_data, - u8 *digest, int algorithm) + u8 *digest, int algorithm) { u32 temp_hx_val, count; int loop_ctr; if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { - dev_err(device_data->dev, "[%s] Incorrect algorithm %d", - __func__, algorithm); + dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", + __func__, algorithm); return; } @@ -1310,8 +1287,8 @@ void hash_get_digest(struct hash_device_data *device_data, else loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); - dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", - __func__, (u32) digest); + dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", + __func__, (u32) digest); /* Copy result into digest array */ for (count = 0; count < loop_ctr; count++) { @@ -1337,8 +1314,7 @@ static int ahash_update(struct ahash_request *req) /* Skip update for DMA, all data will be passed to DMA in final */ if (ret) { - pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", - __func__); + pr_err("%s: hash_hw_update() failed!\n", __func__); } return ret; @@ -1353,7 +1329,7 @@ static int ahash_final(struct ahash_request *req) int ret = 0; struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); + pr_debug("%s: data size: %d\n", __func__, req->nbytes); if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) ret = hash_dma_final(req); @@ -1361,15 +1337,14 @@ static int ahash_final(struct ahash_request *req) ret = hash_hw_final(req); if (ret) { - pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", - __func__); + pr_err("%s: hash_hw/dma_final() failed\n", __func__); } return ret; } static int hash_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen, int alg) + const u8 *key, unsigned int keylen, int alg) { int ret = 0; struct hash_ctx *ctx = crypto_ahash_ctx(tfm); @@ -1379,8 +1354,8 @@ static int hash_setkey(struct crypto_ahash *tfm, */ ctx->key = kmemdup(key, keylen, GFP_KERNEL); if (!ctx->key) { - pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " - "for %d\n", __func__, alg); + pr_err("%s: Failed to allocate ctx->key for %d\n", + __func__, alg); return -ENOMEM; } ctx->keylen = keylen; @@ -1501,13 +1476,13 @@ out: } static int hmac_sha1_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); } static int hmac_sha256_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); } @@ -1528,7 +1503,7 @@ static int hash_cra_init(struct crypto_tfm *tfm) hash); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct hash_req_ctx)); + sizeof(struct hash_req_ctx)); ctx->config.data_format = HASH_DATA_8_BITS; ctx->config.algorithm = hash_alg->conf.algorithm; @@ -1541,98 +1516,97 @@ static int hash_cra_init(struct crypto_tfm *tfm) static struct hash_algo_template hash_algs[] = { { - .conf.algorithm = HASH_ALGO_SHA1, - .conf.oper_mode = HASH_OPER_MODE_HASH, - .hash = { - .init = hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = ahash_sha1_digest, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-ux500", - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, + .conf.algorithm = HASH_ALGO_SHA1, + .conf.oper_mode = HASH_OPER_MODE_HASH, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha1_digest, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, } } }, { - .conf.algorithm = HASH_ALGO_SHA256, - .conf.oper_mode = HASH_OPER_MODE_HASH, - .hash = { - .init = hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = ahash_sha256_digest, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ux500", - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_type = &crypto_ahash_type, - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } + .conf.algorithm = HASH_ALGO_SHA256, + .conf.oper_mode = HASH_OPER_MODE_HASH, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = ahash_sha256_digest, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, } - + } }, { - .conf.algorithm = HASH_ALGO_SHA1, - .conf.oper_mode = HASH_OPER_MODE_HMAC, + .conf.algorithm = HASH_ALGO_SHA1, + .conf.oper_mode = HASH_OPER_MODE_HMAC, .hash = { - .init = hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = hmac_sha1_digest, - .setkey = hmac_sha1_setkey, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac-sha1-ux500", - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_type = &crypto_ahash_type, - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha1_digest, + .setkey = hmac_sha1_setkey, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, } + } }, { - .conf.algorithm = HASH_ALGO_SHA256, - .conf.oper_mode = HASH_OPER_MODE_HMAC, - .hash = { - .init = hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = hmac_sha256_digest, - .setkey = hmac_sha256_setkey, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac-sha256-ux500", - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_type = &crypto_ahash_type, - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } + .conf.algorithm = HASH_ALGO_SHA256, + .conf.oper_mode = HASH_OPER_MODE_HMAC, + .hash = { + .init = hash_init, + .update = ahash_update, + .final = ahash_final, + .digest = hmac_sha256_digest, + .setkey = hmac_sha256_setkey, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct hash_ctx), + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac-sha256-ux500", + .cra_flags = (CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC), + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct hash_ctx), + .cra_type = &crypto_ahash_type, + .cra_init = hash_cra_init, + .cra_module = THIS_MODULE, } + } } }; @@ -1649,7 +1623,7 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) ret = crypto_register_ahash(&hash_algs[i].hash); if (ret) { count = i; - dev_err(device_data->dev, "[%s] alg registration failed", + dev_err(device_data->dev, "%s: alg registration failed\n", hash_algs[i].hash.halg.base.cra_driver_name); goto unreg; } @@ -1683,9 +1657,8 @@ static int ux500_hash_probe(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); + device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); if (!device_data) { - dev_dbg(dev, "[%s] kzalloc() failed!", __func__); ret = -ENOMEM; goto out; } @@ -1695,14 +1668,14 @@ static int ux500_hash_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); + dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); ret = -ENODEV; goto out_kfree; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { - dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); + dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__); ret = -EBUSY; goto out_kfree; } @@ -1710,8 +1683,7 @@ static int ux500_hash_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = ioremap(res->start, resource_size(res)); if (!device_data->base) { - dev_err(dev, "[%s] ioremap() failed!", - __func__); + dev_err(dev, "%s: ioremap() failed!\n", __func__); ret = -ENOMEM; goto out_free_mem; } @@ -1721,7 +1693,7 @@ static int ux500_hash_probe(struct platform_device *pdev) /* Enable power for HASH1 hardware block */ device_data->regulator = regulator_get(dev, "v-ape"); if (IS_ERR(device_data->regulator)) { - dev_err(dev, "[%s] regulator_get() failed!", __func__); + dev_err(dev, "%s: regulator_get() failed!\n", __func__); ret = PTR_ERR(device_data->regulator); device_data->regulator = NULL; goto out_unmap; @@ -1730,27 +1702,27 @@ static int ux500_hash_probe(struct platform_device *pdev) /* Enable the clock for HASH1 hardware block */ device_data->clk = clk_get(dev, NULL); if (IS_ERR(device_data->clk)) { - dev_err(dev, "[%s] clk_get() failed!", __func__); + dev_err(dev, "%s: clk_get() failed!\n", __func__); ret = PTR_ERR(device_data->clk); goto out_regulator; } ret = clk_prepare(device_data->clk); if (ret) { - dev_err(dev, "[%s] clk_prepare() failed!", __func__); + dev_err(dev, "%s: clk_prepare() failed!\n", __func__); goto out_clk; } /* Enable device power (and clock) */ ret = hash_enable_power(device_data, false); if (ret) { - dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); goto out_clk_unprepare; } ret = hash_check_hw(device_data); if (ret) { - dev_err(dev, "[%s] hash_check_hw() failed!", __func__); + dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); goto out_power; } @@ -1766,8 +1738,8 @@ static int ux500_hash_probe(struct platform_device *pdev) ret = ahash_algs_register_all(device_data); if (ret) { - dev_err(dev, "[%s] ahash_algs_register_all() " - "failed!", __func__); + dev_err(dev, "%s: ahash_algs_register_all() failed!\n", + __func__); goto out_power; } @@ -1810,8 +1782,7 @@ static int ux500_hash_remove(struct platform_device *pdev) device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(dev, "[%s]: platform_get_drvdata() failed!", - __func__); + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); return -ENOMEM; } @@ -1841,7 +1812,7 @@ static int ux500_hash_remove(struct platform_device *pdev) ahash_algs_unregister_all(device_data); if (hash_disable_power(device_data, false)) - dev_err(dev, "[%s]: hash_disable_power() failed", + dev_err(dev, "%s: hash_disable_power() failed\n", __func__); clk_unprepare(device_data->clk); @@ -1870,8 +1841,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", - __func__); + dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", + __func__); return; } @@ -1880,8 +1851,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) /* current_ctx allocates a device, NULL = unallocated */ if (!device_data->current_ctx) { if (down_trylock(&driver_data.device_allocation)) - dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" - "Shutting down anyway...", __func__); + dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", + __func__); /** * (Allocate the device) * Need to set this to non-null (dummy) value, @@ -1906,8 +1877,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); if (hash_disable_power(device_data, false)) - dev_err(&pdev->dev, "[%s] hash_disable_power() failed", - __func__); + dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", + __func__); } /** @@ -1922,7 +1893,7 @@ static int ux500_hash_suspend(struct device *dev) device_data = dev_get_drvdata(dev); if (!device_data) { - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); return -ENOMEM; } @@ -1933,15 +1904,16 @@ static int ux500_hash_suspend(struct device *dev) if (device_data->current_ctx == ++temp_ctx) { if (down_interruptible(&driver_data.device_allocation)) - dev_dbg(dev, "[%s]: down_interruptible() failed", + dev_dbg(dev, "%s: down_interruptible() failed\n", __func__); ret = hash_disable_power(device_data, false); - } else + } else { ret = hash_disable_power(device_data, true); + } if (ret) - dev_err(dev, "[%s]: hash_disable_power()", __func__); + dev_err(dev, "%s: hash_disable_power()\n", __func__); return ret; } @@ -1958,7 +1930,7 @@ static int ux500_hash_resume(struct device *dev) device_data = dev_get_drvdata(dev); if (!device_data) { - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); + dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); return -ENOMEM; } @@ -1973,7 +1945,7 @@ static int ux500_hash_resume(struct device *dev) ret = hash_enable_power(device_data, true); if (ret) - dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); + dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); return ret; } @@ -1981,8 +1953,8 @@ static int ux500_hash_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); static const struct of_device_id ux500_hash_match[] = { - { .compatible = "stericsson,ux500-hash" }, - { }, + { .compatible = "stericsson,ux500-hash" }, + { }, }; static struct platform_driver hash_driver = { -- cgit v1.2.3 From 9e01d0c6f63a59cc756c0028e17effdbadee7227 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 14:41:38 -0300 Subject: hwrng: mxc-rnga - Check the return value from clk_prepare_enable() clk_prepare_enable() may fail, so let's check its return value and propagate it in the case of error. Signed-off-by: Fabio Estevam Signed-off-by: Herbert Xu --- drivers/char/hw_random/mxc-rnga.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 19a12ac64a9e..6a86b6f56af2 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -164,7 +164,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) goto out; } - clk_prepare_enable(mxc_rng->clk); + err = clk_prepare_enable(mxc_rng->clk); + if (err) + goto out; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); -- cgit v1.2.3 From eaef7e3f3f82923ea175ce4859908bb0d70072f3 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Fri, 26 Jul 2013 12:29:14 +0530 Subject: crypto: omap-sham - Add SHA384 and SHA512 Support Adding support for SHA348 and SHA512 in addition to MD5, SHA1, SHA224 SHA256 that the omap sha module supports. In order to add the support - Removed hard coded register offsets and passing offsets from pdata - Updating Flag offsets so that they can be used for SHA256 and SHA512 - Adding the algo info. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 11 +- drivers/crypto/omap-sham.c | 245 +++++++++++++++++++++++++++++++++++++-------- 2 files changed, 209 insertions(+), 47 deletions(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8ff7c230d82e..62fb673b28df 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -242,13 +242,16 @@ config CRYPTO_DEV_PPC4XX This option allows you to have support for AMCC crypto acceleration. config CRYPTO_DEV_OMAP_SHAM - tristate "Support for OMAP SHA1/MD5 hw accelerator" - depends on ARCH_OMAP2 || ARCH_OMAP3 + tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" + depends on ARCH_OMAP2PLUS select CRYPTO_SHA1 select CRYPTO_MD5 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_HMAC help - OMAP processors have SHA1/MD5 hw accelerator. Select this if you - want to use the OMAP module for SHA1/MD5 algorithms. + OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you + want to use the OMAP module for MD5/SHA1/SHA2 algorithms. config CRYPTO_DEV_OMAP_AES tristate "Support for OMAP AES hw engine" diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4bb67652c200..f73b1e0fa5e8 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -44,7 +44,6 @@ #include #include -#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE #define MD5_DIGEST_SIZE 16 #define DST_MAXBURST 16 @@ -54,7 +53,7 @@ #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) -#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04)) +#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04)) #define SHA_REG_CTRL 0x18 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) @@ -75,18 +74,21 @@ #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) -#define SHA_REG_MODE 0x44 +#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs) #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) #define SHA_REG_MODE_CLOSE_HASH (1 << 4) #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) -#define SHA_REG_MODE_ALGO_MASK (3 << 1) -#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) -#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) -#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) -#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) -#define SHA_REG_LENGTH 0x48 +#define SHA_REG_MODE_ALGO_MASK (7 << 0) +#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) +#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) +#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) +#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) +#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0) +#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0) + +#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs) #define SHA_REG_IRQSTATUS 0x118 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) @@ -117,18 +119,16 @@ #define FLAGS_SG 17 #define FLAGS_MODE_SHIFT 18 -#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \ - << (FLAGS_MODE_SHIFT - 1)) -#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \ - << (FLAGS_MODE_SHIFT - 1)) -#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \ - << (FLAGS_MODE_SHIFT - 1)) -#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \ - << (FLAGS_MODE_SHIFT - 1)) -#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \ - << (FLAGS_MODE_SHIFT - 1)) -#define FLAGS_HMAC 20 -#define FLAGS_ERROR 21 +#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT) +#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT) + +#define FLAGS_HMAC 21 +#define FLAGS_ERROR 22 #define OP_UPDATE 1 #define OP_FINAL 2 @@ -145,7 +145,7 @@ struct omap_sham_reqctx { unsigned long flags; unsigned long op; - u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED; + u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; size_t bufcnt; size_t buflen; @@ -162,8 +162,8 @@ struct omap_sham_reqctx { struct omap_sham_hmac_ctx { struct crypto_shash *shash; - u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; - u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; + u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; + u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; }; struct omap_sham_ctx { @@ -205,6 +205,8 @@ struct omap_sham_pdata { u32 rev_ofs; u32 mask_ofs; u32 sysstatus_ofs; + u32 mode_ofs; + u32 length_ofs; u32 major_mask; u32 major_shift; @@ -306,9 +308,9 @@ static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { if (out) opad[i] = omap_sham_read(dd, - SHA_REG_ODIGEST(i)); + SHA_REG_ODIGEST(dd, i)); else - omap_sham_write(dd, SHA_REG_ODIGEST(i), + omap_sham_write(dd, SHA_REG_ODIGEST(dd, i), opad[i]); } } @@ -342,6 +344,12 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) case FLAGS_MODE_SHA256: d = SHA256_DIGEST_SIZE / sizeof(u32); break; + case FLAGS_MODE_SHA384: + d = SHA384_DIGEST_SIZE / sizeof(u32); + break; + case FLAGS_MODE_SHA512: + d = SHA512_DIGEST_SIZE / sizeof(u32); + break; default: d = 0; } @@ -404,6 +412,30 @@ static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); } +static int get_block_size(struct omap_sham_reqctx *ctx) +{ + int d; + + switch (ctx->flags & FLAGS_MODE_MASK) { + case FLAGS_MODE_MD5: + case FLAGS_MODE_SHA1: + d = SHA1_BLOCK_SIZE; + break; + case FLAGS_MODE_SHA224: + case FLAGS_MODE_SHA256: + d = SHA256_BLOCK_SIZE; + break; + case FLAGS_MODE_SHA384: + case FLAGS_MODE_SHA512: + d = SHA512_BLOCK_SIZE; + break; + default: + d = 0; + } + + return d; +} + static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, u32 *value, int count) { @@ -422,20 +454,24 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, * CLOSE_HASH only for the last one. Note that flags mode bits * correspond to algorithm encoding in mode register. */ - val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1); + val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); if (!ctx->digcnt) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs, nr_dr; val |= SHA_REG_MODE_ALGO_CONSTANT; if (ctx->flags & BIT(FLAGS_HMAC)) { + bs = get_block_size(ctx); + nr_dr = bs / (2 * sizeof(u32)); val |= SHA_REG_MODE_HMAC_KEY_PROC; - omap_sham_write_n(dd, SHA_REG_ODIGEST(0), - (u32 *)bctx->ipad, - SHA1_BLOCK_SIZE / sizeof(u32)); - ctx->digcnt += SHA1_BLOCK_SIZE; + omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0), + (u32 *)bctx->ipad, nr_dr); + omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0), + (u32 *)bctx->ipad + nr_dr, nr_dr); + ctx->digcnt += bs; } } @@ -451,7 +487,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, SHA_REG_MODE_HMAC_KEY_PROC; dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); - omap_sham_write_mask(dd, SHA_REG_MODE, val, mask); + omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask); omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); omap_sham_write_mask(dd, SHA_REG_MASK(dd), SHA_REG_MASK_IT_EN | @@ -461,7 +497,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) { - omap_sham_write(dd, SHA_REG_LENGTH, length); + omap_sham_write(dd, SHA_REG_LENGTH(dd), length); } static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) @@ -666,14 +702,14 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) /* Start address alignment */ #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) /* SHA1 block size alignment */ -#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) +#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs)) static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int length, final, tail; struct scatterlist *sg; - int ret; + int ret, bs; if (!ctx->total) return 0; @@ -694,23 +730,24 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) ctx->digcnt, ctx->bufcnt, ctx->total); sg = ctx->sg; + bs = get_block_size(ctx); if (!SG_AA(sg)) return omap_sham_update_dma_slow(dd); - if (!sg_is_last(sg) && !SG_SA(sg)) - /* size is not SHA1_BLOCK_SIZE aligned */ + if (!sg_is_last(sg) && !SG_SA(sg, bs)) + /* size is not BLOCK_SIZE aligned */ return omap_sham_update_dma_slow(dd); length = min(ctx->total, sg->length); if (sg_is_last(sg)) { if (!(ctx->flags & BIT(FLAGS_FINUP))) { - /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ - tail = length & (SHA1_MD5_BLOCK_SIZE - 1); + /* not last sg must be BLOCK_SIZE aligned */ + tail = length & (bs - 1); /* without finup() we need one block to close hash */ if (!tail) - tail = SHA1_MD5_BLOCK_SIZE; + tail = bs; length -= tail; } } @@ -773,6 +810,7 @@ static int omap_sham_init(struct ahash_request *req) struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = NULL, *tmp; + int bs = 0; spin_lock_bh(&sham.lock); if (!tctx->dd) { @@ -796,15 +834,27 @@ static int omap_sham_init(struct ahash_request *req) switch (crypto_ahash_digestsize(tfm)) { case MD5_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_MD5; + bs = SHA1_BLOCK_SIZE; break; case SHA1_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA1; + bs = SHA1_BLOCK_SIZE; break; case SHA224_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA224; + bs = SHA224_BLOCK_SIZE; break; case SHA256_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA256; + bs = SHA256_BLOCK_SIZE; + break; + case SHA384_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA384; + bs = SHA384_BLOCK_SIZE; + break; + case SHA512_DIGEST_SIZE: + ctx->flags |= FLAGS_MODE_SHA512; + bs = SHA512_BLOCK_SIZE; break; } @@ -816,8 +866,8 @@ static int omap_sham_init(struct ahash_request *req) if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { struct omap_sham_hmac_ctx *bctx = tctx->base; - memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); - ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; + memcpy(ctx->buffer, bctx->ipad, bs); + ctx->bufcnt = bs; } ctx->flags |= BIT(FLAGS_HMAC); @@ -1006,6 +1056,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + int bs = get_block_size(ctx); if (!req->nbytes) return 0; @@ -1023,7 +1074,7 @@ static int omap_sham_update(struct ahash_request *req) */ omap_sham_append_sg(ctx); return 0; - } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { + } else if (ctx->bufcnt + ctx->total <= bs) { /* * faster to use CPU for short transfers */ @@ -1214,6 +1265,16 @@ static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) return omap_sham_cra_init_alg(tfm, "md5"); } +static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha384"); +} + +static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm) +{ + return omap_sham_cra_init_alg(tfm, "sha512"); +} + static void omap_sham_cra_exit(struct crypto_tfm *tfm) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); @@ -1422,6 +1483,101 @@ static struct ahash_alg algs_sha224_sha256[] = { }, }; +static struct ahash_alg algs_sha384_sha512[] = { +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "omap-sha384", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "omap-sha512", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "omap-hmac-sha384", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha384_init, + .cra_exit = omap_sham_cra_exit, + } +}, +{ + .init = omap_sham_init, + .update = omap_sham_update, + .final = omap_sham_final, + .finup = omap_sham_finup, + .digest = omap_sham_digest, + .setkey = omap_sham_setkey, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "omap-hmac-sha512", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_sham_ctx) + + sizeof(struct omap_sham_hmac_ctx), + .cra_alignmask = OMAP_ALIGN_MASK, + .cra_module = THIS_MODULE, + .cra_init = omap_sham_cra_sha512_init, + .cra_exit = omap_sham_cra_exit, + } +}, +}; + static void omap_sham_done_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; @@ -1548,11 +1704,14 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = { .poll_irq = omap_sham_poll_irq_omap4, .intr_hdlr = omap_sham_irq_omap4, .idigest_ofs = 0x020, + .odigest_ofs = 0x0, .din_ofs = 0x080, .digcnt_ofs = 0x040, .rev_ofs = 0x100, .mask_ofs = 0x110, .sysstatus_ofs = 0x114, + .mode_ofs = 0x44, + .length_ofs = 0x48, .major_mask = 0x0700, .major_shift = 8, .minor_mask = 0x003f, -- cgit v1.2.3 From 7d7c704d0dc9858e6bbb515842e32acb70cf0222 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Fri, 26 Jul 2013 12:29:15 +0530 Subject: crypto: omap-sham - Add OMAP5/AM43XX SHAM Support Add support for the OMAP5 version of the SHAM module that is present on OMAP5 and AM43xx SoCs. This module is very simialar to OMAP4 version of SHAM module, and adds SHA384 SHA512 hardware-accelerated hash functions to it. To handle the higher digest size of SHA512, few SHA512_DIGEST_i (i=1-16, and first 8 registers are duplicated from SHA_DIGEST_i registers) registers are added at the end of register set. So adding the above register offsets and module info in pdata. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index f73b1e0fa5e8..a6eb6a8bbb61 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1718,6 +1718,46 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = { .minor_shift = 0, }; +static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = { + { + .algs_list = algs_sha1_md5, + .size = ARRAY_SIZE(algs_sha1_md5), + }, + { + .algs_list = algs_sha224_sha256, + .size = ARRAY_SIZE(algs_sha224_sha256), + }, + { + .algs_list = algs_sha384_sha512, + .size = ARRAY_SIZE(algs_sha384_sha512), + }, +}; + +static const struct omap_sham_pdata omap_sham_pdata_omap5 = { + .algs_info = omap_sham_algs_info_omap5, + .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5), + .flags = BIT(FLAGS_AUTO_XOR), + .digest_size = SHA512_DIGEST_SIZE, + .copy_hash = omap_sham_copy_hash_omap4, + .write_ctrl = omap_sham_write_ctrl_omap4, + .trigger = omap_sham_trigger_omap4, + .poll_irq = omap_sham_poll_irq_omap4, + .intr_hdlr = omap_sham_irq_omap4, + .idigest_ofs = 0x240, + .odigest_ofs = 0x200, + .din_ofs = 0x080, + .digcnt_ofs = 0x280, + .rev_ofs = 0x100, + .mask_ofs = 0x110, + .sysstatus_ofs = 0x114, + .mode_ofs = 0x284, + .length_ofs = 0x288, + .major_mask = 0x0700, + .major_shift = 8, + .minor_mask = 0x003f, + .minor_shift = 0, +}; + static const struct of_device_id omap_sham_of_match[] = { { .compatible = "ti,omap2-sham", @@ -1727,6 +1767,10 @@ static const struct of_device_id omap_sham_of_match[] = { .compatible = "ti,omap4-sham", .data = &omap_sham_pdata_omap4, }, + { + .compatible = "ti,omap5-sham", + .data = &omap_sham_pdata_omap5, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_sham_of_match); -- cgit v1.2.3 From 0de9c3876b36147e931f475ae435bab04398cd60 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Fri, 26 Jul 2013 12:29:16 +0530 Subject: crypto: omap-sham - Convert to devm_request_irq() Using devm_request_irq() rather than request_irq(). So removing free_irq() calls from the probe error path and the remove handler. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a6eb6a8bbb61..b82b140e843a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1896,10 +1896,11 @@ static int omap_sham_probe(struct platform_device *pdev) } dd->phys_base = res.start; - err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW, - dev_name(dev), dd); + err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, + IRQF_TRIGGER_NONE, dev_name(dev), dd); if (err) { - dev_err(dev, "unable to request irq.\n"); + dev_err(dev, "unable to request irq %d, err = %d\n", + dd->irq, err); goto res_err; } @@ -1912,7 +1913,7 @@ static int omap_sham_probe(struct platform_device *pdev) dev_err(dev, "unable to obtain RX DMA engine channel %u\n", dd->dma); err = -ENXIO; - goto dma_err; + goto res_err; } dd->flags |= dd->pdata->flags; @@ -1950,8 +1951,6 @@ err_algs: &dd->pdata->algs_info[i].algs_list[j]); pm_runtime_disable(dev); dma_release_channel(dd->dma_lch); -dma_err: - free_irq(dd->irq, dd); res_err: kfree(dd); dd = NULL; @@ -1979,7 +1978,6 @@ static int omap_sham_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); dma_release_channel(dd->dma_lch); - free_irq(dd->irq, dd); kfree(dd); dd = NULL; -- cgit v1.2.3 From 7a7e4b73b5e3244063c224c307b1b4b8a0110d6d Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Fri, 26 Jul 2013 12:29:17 +0530 Subject: crypto: omap-sham - Convert to devm_kzalloc() Use devm_kzalloc() to make cleanup paths simpler. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index b82b140e843a..ae1ca8b2dfb2 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1870,7 +1870,7 @@ static int omap_sham_probe(struct platform_device *pdev) int err, i, j; u32 rev; - dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); + dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); err = -ENOMEM; @@ -1887,12 +1887,12 @@ static int omap_sham_probe(struct platform_device *pdev) err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : omap_sham_get_res_pdev(dd, pdev, &res); if (err) - goto res_err; + goto data_err; dd->io_base = devm_ioremap_resource(dev, &res); if (IS_ERR(dd->io_base)) { err = PTR_ERR(dd->io_base); - goto res_err; + goto data_err; } dd->phys_base = res.start; @@ -1901,7 +1901,7 @@ static int omap_sham_probe(struct platform_device *pdev) if (err) { dev_err(dev, "unable to request irq %d, err = %d\n", dd->irq, err); - goto res_err; + goto data_err; } dma_cap_zero(mask); @@ -1913,7 +1913,7 @@ static int omap_sham_probe(struct platform_device *pdev) dev_err(dev, "unable to obtain RX DMA engine channel %u\n", dd->dma); err = -ENXIO; - goto res_err; + goto data_err; } dd->flags |= dd->pdata->flags; @@ -1951,9 +1951,6 @@ err_algs: &dd->pdata->algs_info[i].algs_list[j]); pm_runtime_disable(dev); dma_release_channel(dd->dma_lch); -res_err: - kfree(dd); - dd = NULL; data_err: dev_err(dev, "initialization failed.\n"); @@ -1978,8 +1975,6 @@ static int omap_sham_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); dma_release_channel(dd->dma_lch); - kfree(dd); - dd = NULL; return 0; } -- cgit v1.2.3 From 674349f365ce7ac2b826edc619907a19c7136822 Mon Sep 17 00:00:00 2001 From: Andreas Robinson Date: Thu, 1 Aug 2013 10:55:34 +1000 Subject: modules: add support for soft module dependencies Additional and optional dependencies not found while building the kernel and modules, can now be declared explicitly. Signed-off-by: Andreas Robinson Acked-by: Herbert Xu Signed-off-by: Rusty Russell Signed-off-by: Herbert Xu --- include/linux/module.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/module.h b/include/linux/module.h index 46f1ea01e6f6..504035f3ece1 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -97,6 +97,11 @@ extern const struct gtype##_id __mod_##gtype##_table \ /* For userspace: you can also call me... */ #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias) +/* Soft module dependencies. See man modprobe.d for details. + * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz") + */ +#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep) + /* * The following license idents are currently accepted as indicating free * software modules -- cgit v1.2.3 From 6dad41158db696d77eb17df12d5b1f3e196a7f2f Mon Sep 17 00:00:00 2001 From: Ruchika Gupta Date: Wed, 31 Jul 2013 15:48:56 +0530 Subject: crypto: caam - Remove unused functions from Job Ring Signed-off-by: Ruchika Gupta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 3 -- drivers/crypto/caam/intern.h | 5 ---- drivers/crypto/caam/jr.c | 67 -------------------------------------------- drivers/crypto/caam/jr.h | 2 -- 4 files changed, 77 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 86c96009faad..b010d42a1803 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -313,9 +313,6 @@ static int caam_probe(struct platform_device *pdev) /* NOTE: RTIC detection ought to go here, around Si time */ - /* Initialize queue allocator lock */ - spin_lock_init(&ctrlpriv->jr_alloc_lock); - caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); /* Report "alive" for developer to see */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e4a16b741371..34c4b9f7fbfa 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -9,9 +9,6 @@ #ifndef INTERN_H #define INTERN_H -#define JOBR_UNASSIGNED 0 -#define JOBR_ASSIGNED 1 - /* Currently comes from Kconfig param as a ^2 (driver-required) */ #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) @@ -46,7 +43,6 @@ struct caam_drv_private_jr { struct caam_job_ring __iomem *rregs; /* JobR's register space */ struct tasklet_struct irqtask; int irq; /* One per queue */ - int assign; /* busy/free */ /* Job ring info */ int ringsize; /* Size of rings (assume input = output) */ @@ -68,7 +64,6 @@ struct caam_drv_private { struct device *dev; struct device **jrdev; /* Alloc'ed array per sub-device */ - spinlock_t jr_alloc_lock; struct platform_device *pdev; /* Physical-presence section */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b4aa773ecbc8..105ba4da6180 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -125,72 +125,6 @@ static void caam_jr_dequeue(unsigned long devarg) clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); } -/** - * caam_jr_register() - Alloc a ring for someone to use as needed. Returns - * an ordinal of the rings allocated, else returns -ENODEV if no rings - * are available. - * @ctrldev: points to the controller level dev (parent) that - * owns rings available for use. - * @dev: points to where a pointer to the newly allocated queue's - * dev can be written to if successful. - **/ -int caam_jr_register(struct device *ctrldev, struct device **rdev) -{ - struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); - struct caam_drv_private_jr *jrpriv = NULL; - int ring; - - /* Lock, if free ring - assign, unlock */ - spin_lock(&ctrlpriv->jr_alloc_lock); - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { - jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); - if (jrpriv->assign == JOBR_UNASSIGNED) { - jrpriv->assign = JOBR_ASSIGNED; - *rdev = ctrlpriv->jrdev[ring]; - spin_unlock(&ctrlpriv->jr_alloc_lock); - return ring; - } - } - - /* If assigned, write dev where caller needs it */ - spin_unlock(&ctrlpriv->jr_alloc_lock); - *rdev = NULL; - - return -ENODEV; -} -EXPORT_SYMBOL(caam_jr_register); - -/** - * caam_jr_deregister() - Deregister an API and release the queue. - * Returns 0 if OK, -EBUSY if queue still contains pending entries - * or unprocessed results at the time of the call - * @dev - points to the dev that identifies the queue to - * be released. - **/ -int caam_jr_deregister(struct device *rdev) -{ - struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); - struct caam_drv_private *ctrlpriv; - - /* Get the owning controller's private space */ - ctrlpriv = dev_get_drvdata(jrpriv->parentdev); - - /* - * Make sure ring empty before release - */ - if (rd_reg32(&jrpriv->rregs->outring_used) || - (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) - return -EBUSY; - - /* Release ring */ - spin_lock(&ctrlpriv->jr_alloc_lock); - jrpriv->assign = JOBR_UNASSIGNED; - spin_unlock(&ctrlpriv->jr_alloc_lock); - - return 0; -} -EXPORT_SYMBOL(caam_jr_deregister); - /** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's @@ -379,7 +313,6 @@ static int caam_jr_init(struct device *dev) (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); - jrp->assign = JOBR_UNASSIGNED; return 0; } diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h index c23df395b622..9d8741a59037 100644 --- a/drivers/crypto/caam/jr.h +++ b/drivers/crypto/caam/jr.h @@ -8,8 +8,6 @@ #define JR_H /* Prototypes for backend-level services exposed to APIs */ -int caam_jr_register(struct device *ctrldev, struct device **rdev); -int caam_jr_deregister(struct device *rdev); int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), -- cgit v1.2.3 From 032c8cacc702da8a53c24d24a4e3c3a572a34078 Mon Sep 17 00:00:00 2001 From: Cristian Stoica Date: Thu, 18 Jul 2013 18:57:07 +0300 Subject: crypto: testmgr - remove double execution of the same test suite This patch removes redundant execution of the same test suite in cases where alg and driver variables are the same (e.g. when alg_test is called from tcrypt_test) Signed-off-by: Cristian Stoica Reviewed-by: Horia Geanta Reviewed-by: Ruchika Gupta Signed-off-by: Herbert Xu --- crypto/testmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 2f00607039e2..e091ef6e1791 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3234,7 +3234,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) if (i >= 0) rc |= alg_test_descs[i].test(alg_test_descs + i, driver, type, mask); - if (j >= 0) + if (j >= 0 && j != i) rc |= alg_test_descs[j].test(alg_test_descs + j, driver, type, mask); -- cgit v1.2.3 From 2b7c15ca17128f7c11ebb3d4480f917829703b01 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Fri, 2 Aug 2013 12:09:51 +0000 Subject: crypto: nx - fix physical addresses added to sg lists The co-processor receives data to be hashed through scatter/gather lists pointing to physical addresses. When a vmalloc'ed data is given, the driver must calculate the physical address to each page of the data. However the current version of it just calculates the physical address once and keeps incrementing it even when a page boundary is crossed. This patch fixes this behaviour. Reviewed-by: Fionnuala Gunter Reviewed-by: Joel Schopp Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index bbdab6e5ccf0..ad07dc62b95a 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -114,13 +114,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, * have been described (or @sgmax elements have been written), the * loop ends. min_t is used to ensure @end_addr falls on the same page * as sg_addr, if not, we need to create another nx_sg element for the - * data on the next page */ + * data on the next page. + * + * Also when using vmalloc'ed data, every time that a system page + * boundary is crossed the physical address needs to be re-calculated. + */ for (sg = sg_head; sg_len < len; sg++) { + u64 next_page; + sg->addr = sg_addr; - sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); - sg->len = sg_addr - sg->addr; + sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), + end_addr); + + next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; + sg->len = min_t(u64, sg_addr, next_page) - sg->addr; sg_len += sg->len; + if (sg_addr >= next_page && + is_vmalloc_addr(start_addr + sg_len)) { + sg_addr = page_to_phys(vmalloc_to_page( + start_addr + sg_len)); + end_addr = sg_addr + len - sg_len; + } + if ((sg - sg_head) == sgmax) { pr_err("nx: scatter/gather list overflow, pid: %d\n", current->pid); -- cgit v1.2.3 From d311149337f93ae4de60a2f1c24a0d856089903f Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Fri, 2 Aug 2013 12:09:52 +0000 Subject: crypto: nx - fix limits to sg lists for SHA-2 The co-processor has several limits regarding the length of scatter/gather lists and the total number of bytes in it. These limits are available in the device tree, as following: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": used for synchronous operations, it is an array of structures that contains information regarding the limits that must be considered for each mode and operation. The most important limits in it are: - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. This patch updates the NX driver to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Reviewed-by: Fionnuala Gunter Reviewed-by: Joel Schopp Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-sha256.c | 110 ++++++++++++++++++++++----------------- drivers/crypto/nx/nx-sha512.c | 117 ++++++++++++++++++++++++------------------ 2 files changed, 132 insertions(+), 95 deletions(-) diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 67024f2f0b78..254b01abef64 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -55,70 +55,86 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; - u64 to_process, leftover; + u64 to_process, leftover, total; + u32 max_sg_len; int rc = 0; - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously and we're updating again, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha256.input_partial_digest, - csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); - } - /* 2 cases for total data len: - * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 - * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover + * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count < SHA256_BLOCK_SIZE) { + total = sctx->count + len; + if (total < SHA256_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count, data, len); sctx->count += len; goto out; } - /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this - * update */ - to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); - leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); - - if (sctx->count) { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count, nx_ctx->ap->sglen); - in_sg = nx_build_sg_list(in_sg, (u8 *)data, + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* + * to_process: the SHA256_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = min_t(u64, total, nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); + leftover = total - to_process; + + if (sctx->count) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + sctx->count, max_sg_len); + } + in_sg = nx_build_sg_list(in_sg, (u8 *) data, to_process - sctx->count, - nx_ctx->ap->sglen); - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * - sizeof(struct nx_sg); - } else { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, - to_process, nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - } - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, + csbcpb->cpb.sha256.message_digest, + SHA256_DIGEST_SIZE); + } - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; - atomic_inc(&(nx_ctx->stats->sha256_ops)); + atomic_inc(&(nx_ctx->stats->sha256_ops)); + csbcpb->cpb.sha256.message_bit_length += (u64) + (csbcpb->cpb.sha256.spbc * 8); + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + total -= to_process; + data += to_process; + sctx->count = 0; + in_sg = nx_ctx->in_sg; + } while (leftover >= SHA256_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data, leftover); sctx->count = leftover; - - csbcpb->cpb.sha256.message_bit_length += (u64) - (csbcpb->cpb.sha256.spbc * 8); - - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: return rc; } @@ -129,8 +145,10 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; int rc; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, @@ -146,9 +164,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count, nx_ctx->ap->sglen); + sctx->count, max_sg_len); out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, - nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 08eee1122349..2d6d91359833 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -55,72 +55,88 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; - u64 to_process, leftover, spbc_bits; + u64 to_process, leftover, total, spbc_bits; + u32 max_sg_len; int rc = 0; - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously and we're updating again, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha512.input_partial_digest, - csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); - } - /* 2 cases for total data len: - * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 - * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover + * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ - if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { + total = sctx->count[0] + len; + if (total < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count[0], data, len); sctx->count[0] += len; goto out; } - /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this - * update */ - to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1); - leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1); - - if (sctx->count[0]) { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count[0], nx_ctx->ap->sglen); - in_sg = nx_build_sg_list(in_sg, (u8 *)data, + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* + * to_process: the SHA512_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = min_t(u64, total, nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); + leftover = total - to_process; + + if (sctx->count[0]) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + sctx->count[0], max_sg_len); + } + in_sg = nx_build_sg_list(in_sg, (u8 *) data, to_process - sctx->count[0], - nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - } else { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, - to_process, nx_ctx->ap->sglen); - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * - sizeof(struct nx_sg); - } - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, + csbcpb->cpb.sha512.message_digest, + SHA512_DIGEST_SIZE); + } - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } - - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + spbc_bits = csbcpb->cpb.sha512.spbc * 8; + csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; + if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) + csbcpb->cpb.sha512.message_bit_length_hi++; + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - atomic_inc(&(nx_ctx->stats->sha512_ops)); + total -= to_process; + data += to_process; + sctx->count[0] = 0; + in_sg = nx_ctx->in_sg; + } while (leftover >= SHA512_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data, leftover); sctx->count[0] = leftover; - - spbc_bits = csbcpb->cpb.sha512.spbc * 8; - csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; - if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) - csbcpb->cpb.sha512.message_bit_length_hi++; - - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: return rc; } @@ -131,9 +147,12 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; u64 count0; int rc; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -152,9 +171,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha512.message_bit_length_hi++; in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], - nx_ctx->ap->sglen); + max_sg_len); out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, - nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); -- cgit v1.2.3 From 4390f77b37b89d32236676a48997d83489feff4e Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:18 +0530 Subject: hwrng: omap - Use module_platform_driver macro module_platform_driver() makes the code simpler. Using the macro in the driver. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 6843ec87b98b..3e9a7ecbd5e5 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -198,9 +198,6 @@ static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume); #endif -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:omap_rng"); - static struct platform_driver omap_rng_driver = { .driver = { .name = "omap_rng", @@ -211,18 +208,7 @@ static struct platform_driver omap_rng_driver = { .remove = __exit_p(omap_rng_remove), }; -static int __init omap_rng_init(void) -{ - return platform_driver_register(&omap_rng_driver); -} - -static void __exit omap_rng_exit(void) -{ - platform_driver_unregister(&omap_rng_driver); -} - -module_init(omap_rng_init); -module_exit(omap_rng_exit); - +module_platform_driver(omap_rng_driver); +MODULE_ALIAS("platform:omap_rng"); MODULE_AUTHOR("Deepak Saxena (and others)"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From d52dc81e14c648fb1015d51c658347127d624173 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:19 +0530 Subject: hwrng: omap - Convert to devm_kzalloc() Use devm_kzalloc() to make cleanup paths simpler. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 3e9a7ecbd5e5..9c19396b940f 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -109,7 +109,8 @@ static int omap_rng_probe(struct platform_device *pdev) struct omap_rng_private_data *priv; int ret; - priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(struct omap_rng_private_data), + GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "could not allocate memory\n"); return -ENOMEM; @@ -144,8 +145,6 @@ err_register: priv->base = NULL; pm_runtime_disable(&pdev->dev); err_ioremap: - kfree(priv); - return ret; } @@ -160,10 +159,6 @@ static int __exit omap_rng_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - release_mem_region(priv->mem_res->start, resource_size(priv->mem_res)); - - kfree(priv); - return 0; } -- cgit v1.2.3 From 7256c476f79d2b50a32cecb0330362afd2fa0b50 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:20 +0530 Subject: hwrng: omap - Remove duplicated function call platform_set_drvdata() is called twice in driver probe. Removing the duplicated call. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 9c19396b940f..5a2ab3b3cd78 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -125,7 +125,6 @@ static int omap_rng_probe(struct platform_device *pdev) ret = PTR_ERR(priv->base); goto err_ioremap; } - platform_set_drvdata(pdev, priv); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); -- cgit v1.2.3 From c903970ceaaaddb665d356d078372f0985d5948e Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:21 +0530 Subject: hwrng: omap - Add device tree support Add Device Tree suport to the omap-rng driver. Currently, only support for OMAP2 and OMAP3 is being added but support for OMAP4 and OMAP5 will be added in a subsequent patch. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 5a2ab3b3cd78..3076c9de7a02 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include @@ -104,6 +107,14 @@ static struct hwrng omap_rng_ops = { .data_read = omap_rng_data_read, }; +#if defined(CONFIG_OF) +static const struct of_device_id omap_rng_of_match[] = { + { .compatible = "ti,omap2-rng" }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_rng_of_match); +#endif + static int omap_rng_probe(struct platform_device *pdev) { struct omap_rng_private_data *priv; @@ -197,6 +208,7 @@ static struct platform_driver omap_rng_driver = { .name = "omap_rng", .owner = THIS_MODULE, .pm = OMAP_RNG_PM, + .of_match_table = of_match_ptr(omap_rng_of_match), }, .probe = omap_rng_probe, .remove = __exit_p(omap_rng_remove), -- cgit v1.2.3 From 674ee08f28f32032c40c6a9d5376edf4158bfbbc Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:22 +0530 Subject: ARM: OMAP2+: Only manually add hwmod data when DT not used. The omap_init_rng() routine in devices.c only needs to be called when there is no device tree present. Cc: Tony Lindgren Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- arch/arm/mach-omap2/devices.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index aef96e45cb20..ba8d06598e55 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -571,12 +571,12 @@ static int __init omap2_init_devices(void) omap_init_mcspi(); omap_init_sham(); omap_init_aes(); + omap_init_rng(); } else { /* These can be removed when bindings are done */ omap_init_wl12xx_of(); } omap_init_sti(); - omap_init_rng(); omap_init_vout(); return 0; -- cgit v1.2.3 From e83872c989fb704748956c0bf1b69874a35492c6 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Mon, 5 Aug 2013 20:17:23 +0530 Subject: hwrng: omap - Add OMAP4 TRNG support Add support for OMAP4 version of TRNG module that is present on OMAP4, AM33xx and OMAP5 SoCs. The modules have several differences including register offsets, output size, triggering rng and how configuring FROs. To handle these differences, a platform_data structure is defined and contains routine pointers, register offsets. OMAP2 specific routines are prefixed with 'omap2_' and OMAP4 specific routines are prefixed with 'omap4_'. Note: Few Hard coded values are from the TI AM33xx SDK. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 6 +- drivers/char/hw_random/omap-rng.c | 352 ++++++++++++++++++++++++++++++++------ 2 files changed, 305 insertions(+), 53 deletions(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f9dbf7568fb..5d7be1d65d92 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -153,12 +153,12 @@ config HW_RANDOM_IXP4XX config HW_RANDOM_OMAP tristate "OMAP Random Number Generator support" - depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2) + depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2PLUS) default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number - Generator hardware found on OMAP16xx and OMAP24xx multimedia - processors. + Generator hardware found on OMAP16xx, OMAP2/3/4/5 and AM33xx/AM43xx + multimedia processors. To compile this driver as a module, choose M here: the module will be called omap-rng. diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 3076c9de7a02..f3f71425e7e2 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -27,57 +27,138 @@ #include #include #include +#include #include -#define RNG_OUT_REG 0x00 /* Output register */ -#define RNG_STAT_REG 0x04 /* Status register - [0] = STAT_BUSY */ -#define RNG_ALARM_REG 0x24 /* Alarm register - [7:0] = ALARM_COUNTER */ -#define RNG_CONFIG_REG 0x28 /* Configuration register - [11:6] = RESET_COUNT - [5:3] = RING2_DELAY - [2:0] = RING1_DELAY */ -#define RNG_REV_REG 0x3c /* Revision register - [7:0] = REV_NB */ -#define RNG_MASK_REG 0x40 /* Mask and reset register - [2] = IT_EN - [1] = SOFTRESET - [0] = AUTOIDLE */ -#define RNG_SYSSTATUS 0x44 /* System status - [0] = RESETDONE */ +#define RNG_REG_STATUS_RDY (1 << 0) + +#define RNG_REG_INTACK_RDY_MASK (1 << 0) +#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1) +#define RNG_SHUTDOWN_OFLO_MASK (1 << 1) + +#define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16 +#define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16) +#define RNG_CONTROL_ENABLE_TRNG_SHIFT 10 +#define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10) + +#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16 +#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16) +#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0 +#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0) + +#define RNG_CONTROL_STARTUP_CYCLES 0xff +#define RNG_CONFIG_MIN_REFIL_CYCLES 0x21 +#define RNG_CONFIG_MAX_REFIL_CYCLES 0x22 + +#define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0 +#define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0) +#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16 +#define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16) +#define RNG_ALARM_THRESHOLD 0xff +#define RNG_SHUTDOWN_THRESHOLD 0x4 + +#define RNG_REG_FROENABLE_MASK 0xffffff +#define RNG_REG_FRODETUNE_MASK 0xffffff + +#define OMAP2_RNG_OUTPUT_SIZE 0x4 +#define OMAP4_RNG_OUTPUT_SIZE 0x8 + +enum { + RNG_OUTPUT_L_REG = 0, + RNG_OUTPUT_H_REG, + RNG_STATUS_REG, + RNG_INTMASK_REG, + RNG_INTACK_REG, + RNG_CONTROL_REG, + RNG_CONFIG_REG, + RNG_ALARMCNT_REG, + RNG_FROENABLE_REG, + RNG_FRODETUNE_REG, + RNG_ALARMMASK_REG, + RNG_ALARMSTOP_REG, + RNG_REV_REG, + RNG_SYSCONFIG_REG, +}; + +static const u16 reg_map_omap2[] = { + [RNG_OUTPUT_L_REG] = 0x0, + [RNG_STATUS_REG] = 0x4, + [RNG_CONFIG_REG] = 0x28, + [RNG_REV_REG] = 0x3c, + [RNG_SYSCONFIG_REG] = 0x40, +}; + +static const u16 reg_map_omap4[] = { + [RNG_OUTPUT_L_REG] = 0x0, + [RNG_OUTPUT_H_REG] = 0x4, + [RNG_STATUS_REG] = 0x8, + [RNG_INTMASK_REG] = 0xc, + [RNG_INTACK_REG] = 0x10, + [RNG_CONTROL_REG] = 0x14, + [RNG_CONFIG_REG] = 0x18, + [RNG_ALARMCNT_REG] = 0x1c, + [RNG_FROENABLE_REG] = 0x20, + [RNG_FRODETUNE_REG] = 0x24, + [RNG_ALARMMASK_REG] = 0x28, + [RNG_ALARMSTOP_REG] = 0x2c, + [RNG_REV_REG] = 0x1FE0, + [RNG_SYSCONFIG_REG] = 0x1FE4, +}; +struct omap_rng_dev; /** - * struct omap_rng_private_data - RNG IP block-specific data - * @base: virtual address of the beginning of the RNG IP block registers - * @mem_res: struct resource * for the IP block registers physical memory + * struct omap_rng_pdata - RNG IP block-specific data + * @regs: Pointer to the register offsets structure. + * @data_size: No. of bytes in RNG output. + * @data_present: Callback to determine if data is available. + * @init: Callback for IP specific initialization sequence. + * @cleanup: Callback for IP specific cleanup sequence. */ -struct omap_rng_private_data { - void __iomem *base; - struct resource *mem_res; +struct omap_rng_pdata { + u16 *regs; + u32 data_size; + u32 (*data_present)(struct omap_rng_dev *priv); + int (*init)(struct omap_rng_dev *priv); + void (*cleanup)(struct omap_rng_dev *priv); }; -static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg) +struct omap_rng_dev { + void __iomem *base; + struct device *dev; + const struct omap_rng_pdata *pdata; +}; + +static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) +{ + return __raw_readl(priv->base + priv->pdata->regs[reg]); +} + +static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, + u32 val) { - return __raw_readl(priv->base + reg); + __raw_writel(val, priv->base + priv->pdata->regs[reg]); } -static inline void omap_rng_write_reg(struct omap_rng_private_data *priv, - int reg, u32 val) +static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) { - __raw_writel(val, priv->base + reg); + return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; +} + +static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) +{ + return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; } static int omap_rng_data_present(struct hwrng *rng, int wait) { - struct omap_rng_private_data *priv; + struct omap_rng_dev *priv; int data, i; - priv = (struct omap_rng_private_data *)rng->priv; + priv = (struct omap_rng_dev *)rng->priv; for (i = 0; i < 20; i++) { - data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1; + data = priv->pdata->data_present(priv); if (data || !wait) break; /* RNG produces data fast enough (2+ MBit/sec, even @@ -92,36 +173,202 @@ static int omap_rng_data_present(struct hwrng *rng, int wait) static int omap_rng_data_read(struct hwrng *rng, u32 *data) { - struct omap_rng_private_data *priv; + struct omap_rng_dev *priv; + u32 data_size, i; + + priv = (struct omap_rng_dev *)rng->priv; + data_size = priv->pdata->data_size; + + for (i = 0; i < data_size / sizeof(u32); i++) + data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i); + + if (priv->pdata->regs[RNG_INTACK_REG]) + omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); + return data_size; +} + +static int omap4_rng_init(struct omap_rng_dev *priv) +{ + u32 val; + + /* Return if RNG is already running. */ + if (omap_rng_read(priv, RNG_CONFIG_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) + return 0; + + val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; + val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; + omap_rng_write(priv, RNG_CONFIG_REG, val); + + omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); + omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); + val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT; + val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT; + omap_rng_write(priv, RNG_ALARMCNT_REG, val); + + val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT; + val |= RNG_CONTROL_ENABLE_TRNG_MASK; + omap_rng_write(priv, RNG_CONTROL_REG, val); + + return 0; +} + +static void omap4_rng_cleanup(struct omap_rng_dev *priv) +{ + int val; + + val = omap_rng_read(priv, RNG_CONTROL_REG); + val &= ~RNG_CONTROL_ENABLE_TRNG_MASK; + omap_rng_write(priv, RNG_CONFIG_REG, val); +} + +static int omap2_rng_init(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); + return 0; +} + +static void omap2_rng_cleanup(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); +} + +static int omap_rng_init(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + return priv->pdata->init(priv); +} + +static void omap_rng_cleanup(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + priv->pdata->cleanup(priv); +} + +static irqreturn_t omap4_rng_irq(int irq, void *dev_id) +{ + struct omap_rng_dev *priv = dev_id; + u32 fro_detune, fro_enable; + + /* + * Interrupt raised by a fro shutdown threshold, do the following: + * 1. Clear the alarm events. + * 2. De tune the FROs which are shutdown. + * 3. Re enable the shutdown FROs. + */ + omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0); + omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0); + + fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG); + fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK; + fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG); + fro_enable = RNG_REG_FROENABLE_MASK; - priv = (struct omap_rng_private_data *)rng->priv; + omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune); + omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable); - *data = omap_rng_read_reg(priv, RNG_OUT_REG); + omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK); - return sizeof(u32); + return IRQ_HANDLED; } static struct hwrng omap_rng_ops = { .name = "omap", .data_present = omap_rng_data_present, .data_read = omap_rng_data_read, + .init = omap_rng_init, + .cleanup = omap_rng_cleanup, +}; + +static struct omap_rng_pdata omap2_rng_pdata = { + .regs = (u16 *)reg_map_omap2, + .data_size = OMAP2_RNG_OUTPUT_SIZE, + .data_present = omap2_rng_data_present, + .init = omap2_rng_init, + .cleanup = omap2_rng_cleanup, }; #if defined(CONFIG_OF) +static struct omap_rng_pdata omap4_rng_pdata = { + .regs = (u16 *)reg_map_omap4, + .data_size = OMAP4_RNG_OUTPUT_SIZE, + .data_present = omap4_rng_data_present, + .init = omap4_rng_init, + .cleanup = omap4_rng_cleanup, +}; + static const struct of_device_id omap_rng_of_match[] = { - { .compatible = "ti,omap2-rng" }, + { + .compatible = "ti,omap2-rng", + .data = &omap2_rng_pdata, + }, + { + .compatible = "ti,omap4-rng", + .data = &omap4_rng_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_rng_of_match); + +static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, + struct platform_device *pdev) +{ + const struct of_device_id *match; + struct device *dev = &pdev->dev; + int irq, err; + + match = of_match_device(of_match_ptr(omap_rng_of_match), dev); + if (!match) { + dev_err(dev, "no compatible OF match\n"); + return -EINVAL; + } + priv->pdata = match->data; + + if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "%s: error getting IRQ resource - %d\n", + __func__, irq); + return irq; + } + + err = devm_request_irq(dev, irq, omap4_rng_irq, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (err) { + dev_err(dev, "unable to request irq %d, err = %d\n", + irq, err); + return err; + } + omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); + } + return 0; +} +#else +static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng, + struct platform_device *pdev) +{ + return -EINVAL; +} #endif +static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) +{ + /* Only OMAP2/3 can be non-DT */ + omap_rng->pdata = &omap2_rng_pdata; + return 0; +} + static int omap_rng_probe(struct platform_device *pdev) { - struct omap_rng_private_data *priv; + struct omap_rng_dev *priv; + struct resource *res; + struct device *dev = &pdev->dev; int ret; - priv = devm_kzalloc(&pdev->dev, sizeof(struct omap_rng_private_data), - GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "could not allocate memory\n"); return -ENOMEM; @@ -129,9 +376,10 @@ static int omap_rng_probe(struct platform_device *pdev) omap_rng_ops.priv = (unsigned long)priv; platform_set_drvdata(pdev, priv); + priv->dev = dev; - priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto err_ioremap; @@ -140,14 +388,17 @@ static int omap_rng_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); + ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : + get_omap_rng_device_details(priv); + if (ret) + goto err_ioremap; + ret = hwrng_register(&omap_rng_ops); if (ret) goto err_register; dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", - omap_rng_read_reg(priv, RNG_REV_REG)); - - omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); + omap_rng_read(priv, RNG_REV_REG)); return 0; @@ -155,16 +406,17 @@ err_register: priv->base = NULL; pm_runtime_disable(&pdev->dev); err_ioremap: + dev_err(dev, "initialization failed.\n"); return ret; } static int __exit omap_rng_remove(struct platform_device *pdev) { - struct omap_rng_private_data *priv = platform_get_drvdata(pdev); + struct omap_rng_dev *priv = platform_get_drvdata(pdev); hwrng_unregister(&omap_rng_ops); - omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); + priv->pdata->cleanup(priv); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -176,9 +428,9 @@ static int __exit omap_rng_remove(struct platform_device *pdev) static int omap_rng_suspend(struct device *dev) { - struct omap_rng_private_data *priv = dev_get_drvdata(dev); + struct omap_rng_dev *priv = dev_get_drvdata(dev); - omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); + priv->pdata->cleanup(priv); pm_runtime_put_sync(dev); return 0; @@ -186,10 +438,10 @@ static int omap_rng_suspend(struct device *dev) static int omap_rng_resume(struct device *dev) { - struct omap_rng_private_data *priv = dev_get_drvdata(dev); + struct omap_rng_dev *priv = dev_get_drvdata(dev); pm_runtime_get_sync(dev); - omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); + priv->pdata->init(priv); return 0; } -- cgit v1.2.3 From cc76daf793287b2c51b0b66153072b23cdb51c1c Mon Sep 17 00:00:00 2001 From: Fionnuala Gunter Date: Wed, 7 Aug 2013 18:15:50 -0500 Subject: crypto: nx - saves chaining value from co-processor This patch fixes a bug that is triggered when cts(cbc(aes)) is used with nx-crypto driver on input larger than 32 bytes. The chaining value from co-processor was not being saved. This value is needed because it is used as the IV by cts(cbc(aes)). Signed-off-by: Fionnuala Gunter Reviewed-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-cbc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 35d483f8db66..a2f99a910e4a 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -95,6 +95,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, if (rc) goto out; + memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); -- cgit v1.2.3 From 75be45683d5ff3f859f9cb2082574f96f8fd768f Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 9 Aug 2013 14:53:50 +0900 Subject: hwrng: pixocel - Staticize 'rng_dev' 'rng_dev' is used only in this file. Fix the following sparse warning: drivers/char/hw_random/picoxcell-rng.c:36:15: warning: symbol 'rng_dev' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/char/hw_random/picoxcell-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c index 973b95113edf..3d4c2293c6f5 100644 --- a/drivers/char/hw_random/picoxcell-rng.c +++ b/drivers/char/hw_random/picoxcell-rng.c @@ -33,7 +33,7 @@ static void __iomem *rng_base; static struct clk *rng_clk; -struct device *rng_dev; +static struct device *rng_dev; static inline u32 picoxcell_trng_read_csr(void) { -- cgit v1.2.3 From e45f1d1879650accd79a9a2b2bc1afed2aedc97c Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 9 Aug 2013 16:57:48 +0900 Subject: crypto: sahara - Staticize local symbol This local symbol is used only in this file. Fix the following sparse warnings: drivers/crypto/sahara.c:420:6: warning: symbol 'sahara_watchdog' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index c3dc1c04a5df..b076d8110785 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -417,7 +417,7 @@ static void sahara_aes_done_task(unsigned long data) dev->req->base.complete(&dev->req->base, dev->error); } -void sahara_watchdog(unsigned long data) +static void sahara_watchdog(unsigned long data) { struct sahara_dev *dev = (struct sahara_dev *)data; unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); -- cgit v1.2.3 From 3a4eac799b7b8afa00e621a88092a93dd4de2506 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 9 Aug 2013 16:59:57 +0900 Subject: crypto: crypto4xx - Staticize local symbols These local symbols are used only in this file. Fix the following sparse warnings: drivers/crypto/amcc/crypto4xx_alg.c:35:6: warning: symbol 'set_dynamic_sa_command_0' was not declared. Should it be static? drivers/crypto/amcc/crypto4xx_alg.c:55:6: warning: symbol 'set_dynamic_sa_command_1' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_alg.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a33243c17b00..4afca3968773 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -32,10 +32,10 @@ #include "crypto4xx_sa.h" #include "crypto4xx_core.h" -void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, - u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc, - u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op, - u32 dir) +static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, + u32 save_iv, u32 ld_h, u32 ld_iv, + u32 hdr_proc, u32 h, u32 c, u32 pad_type, + u32 op_grp, u32 op, u32 dir) { sa->sa_command_0.w = 0; sa->sa_command_0.bf.save_hash_state = save_h; @@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, sa->sa_command_0.bf.dir = dir; } -void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc, - u32 cfb, u32 esn, u32 sn_mask, u32 mute, - u32 cp_pad, u32 cp_pay, u32 cp_hdr) +static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, + u32 hmac_mc, u32 cfb, u32 esn, + u32 sn_mask, u32 mute, u32 cp_pad, + u32 cp_pay, u32 cp_hdr) { sa->sa_command_1.w = 0; sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; -- cgit v1.2.3 From f22d08111a1d23f7432ee8d9c2dd637deb6963bd Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 10 Aug 2013 18:01:11 -0700 Subject: crypto: make tables used from assembler __visible Tables used from assembler should be marked __visible to let the compiler know. Signed-off-by: Andi Kleen Signed-off-by: Herbert Xu --- arch/x86/crypto/camellia_glue.c | 16 ++++++++-------- crypto/aes_generic.c | 8 ++++---- crypto/cast_common.c | 8 ++++---- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 5cb86ccd4acb..fa4c1b9d9ea9 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -62,7 +62,7 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } /* camellia sboxes */ -const u64 camellia_sp10011110[256] = { +__visible const u64 camellia_sp10011110[256] = { 0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL, 0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL, 0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL, @@ -151,7 +151,7 @@ const u64 camellia_sp10011110[256] = { 0x9e00009e9e9e9e00ULL, }; -const u64 camellia_sp22000222[256] = { +__visible const u64 camellia_sp22000222[256] = { 0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL, 0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL, 0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL, @@ -240,7 +240,7 @@ const u64 camellia_sp22000222[256] = { 0x3d3d0000003d3d3dULL, }; -const u64 camellia_sp03303033[256] = { +__visible const u64 camellia_sp03303033[256] = { 0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL, 0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL, 0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL, @@ -329,7 +329,7 @@ const u64 camellia_sp03303033[256] = { 0x004f4f004f004f4fULL, }; -const u64 camellia_sp00444404[256] = { +__visible const u64 camellia_sp00444404[256] = { 0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL, 0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL, 0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL, @@ -418,7 +418,7 @@ const u64 camellia_sp00444404[256] = { 0x00009e9e9e9e009eULL, }; -const u64 camellia_sp02220222[256] = { +__visible const u64 camellia_sp02220222[256] = { 0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL, 0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL, 0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL, @@ -507,7 +507,7 @@ const u64 camellia_sp02220222[256] = { 0x003d3d3d003d3d3dULL, }; -const u64 camellia_sp30333033[256] = { +__visible const u64 camellia_sp30333033[256] = { 0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL, 0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL, 0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL, @@ -596,7 +596,7 @@ const u64 camellia_sp30333033[256] = { 0x4f004f4f4f004f4fULL, }; -const u64 camellia_sp44044404[256] = { +__visible const u64 camellia_sp44044404[256] = { 0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL, 0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL, 0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL, @@ -685,7 +685,7 @@ const u64 camellia_sp44044404[256] = { 0x9e9e009e9e9e009eULL, }; -const u64 camellia_sp11101110[256] = { +__visible const u64 camellia_sp11101110[256] = { 0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL, 0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL, 0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL, diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 47f2e5c71759..fd0d6b454975 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -62,7 +62,7 @@ static inline u8 byte(const u32 x, const unsigned n) static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; -const u32 crypto_ft_tab[4][256] = { +__visible const u32 crypto_ft_tab[4][256] = { { 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591, @@ -326,7 +326,7 @@ const u32 crypto_ft_tab[4][256] = { } }; -const u32 crypto_fl_tab[4][256] = { +__visible const u32 crypto_fl_tab[4][256] = { { 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, @@ -590,7 +590,7 @@ const u32 crypto_fl_tab[4][256] = { } }; -const u32 crypto_it_tab[4][256] = { +__visible const u32 crypto_it_tab[4][256] = { { 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b, @@ -854,7 +854,7 @@ const u32 crypto_it_tab[4][256] = { } }; -const u32 crypto_il_tab[4][256] = { +__visible const u32 crypto_il_tab[4][256] = { { 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, 0x00000030, 0x00000036, 0x000000a5, 0x00000038, diff --git a/crypto/cast_common.c b/crypto/cast_common.c index a15f523d5f56..117dd8250f27 100644 --- a/crypto/cast_common.c +++ b/crypto/cast_common.c @@ -15,7 +15,7 @@ #include #include -const u32 cast_s1[256] = { +__visible const u32 cast_s1[256] = { 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, @@ -83,7 +83,7 @@ const u32 cast_s1[256] = { }; EXPORT_SYMBOL_GPL(cast_s1); -const u32 cast_s2[256] = { +__visible const u32 cast_s2[256] = { 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, @@ -151,7 +151,7 @@ const u32 cast_s2[256] = { }; EXPORT_SYMBOL_GPL(cast_s2); -const u32 cast_s3[256] = { +__visible const u32 cast_s3[256] = { 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, @@ -219,7 +219,7 @@ const u32 cast_s3[256] = { }; EXPORT_SYMBOL_GPL(cast_s3); -const u32 cast_s4[256] = { +__visible const u32 cast_s4[256] = { 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, -- cgit v1.2.3 From c849163b80c05f4567b1adef5db7f377460f88cd Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Mon, 12 Aug 2013 18:49:37 -0300 Subject: crypto: nx - fix concurrency issue The NX driver uses the transformation context to store several fields containing data related to the state of the operations in progress. Since a single tfm can be used by different kernel threads at the same time, we need to protect the data stored into the context. This patch makes use of spin locks to protect the data where a race condition can happen. Reviewed-by: Fionnuala Gunter Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-cbc.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-ccm.c | 20 ++++++++++++++++---- drivers/crypto/nx/nx-aes-ctr.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-ecb.c | 10 ++++++++-- drivers/crypto/nx/nx-aes-gcm.c | 4 ++++ drivers/crypto/nx/nx-aes-xcbc.c | 8 ++++++++ drivers/crypto/nx/nx-sha256.c | 16 ++++++++++++++++ drivers/crypto/nx/nx-sha512.c | 16 ++++++++++++++++ drivers/crypto/nx/nx.c | 4 ++-- drivers/crypto/nx/nx.h | 1 + 10 files changed, 87 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index a2f99a910e4a..7c0237dae02d 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -70,10 +70,15 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; @@ -100,6 +105,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index ef5eae6d1400..39d42245bc79 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -271,10 +271,15 @@ static int ccm_nx_decrypt(struct aead_request *req, unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; + unsigned long irq_flags; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } nbytes -= authsize; @@ -308,6 +313,7 @@ static int ccm_nx_decrypt(struct aead_request *req, rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -318,10 +324,15 @@ static int ccm_nx_encrypt(struct aead_request *req, struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + unsigned long irq_flags; int rc = -1; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, csbcpb->cpb.aes_ccm.in_pat_or_b0); @@ -350,6 +361,7 @@ static int ccm_nx_encrypt(struct aead_request *req, req->dst, nbytes, authsize, SCATTERWALK_TO_SG); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index b6286f14680b..762611b883cb 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -88,10 +88,15 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, csbcpb->cpb.aes_ctr.iv); @@ -112,6 +117,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 7bbc9a81da21..77dbe084ba41 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -70,10 +70,15 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned long irq_flags; int rc; - if (nbytes > nx_ctx->ap->databytelen) - return -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + if (nbytes > nx_ctx->ap->databytelen) { + rc = -EINVAL; + goto out; + } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; @@ -98,6 +103,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 6cca6c392b00..df90d03afc10 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -166,8 +166,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; + unsigned long irq_flags; int rc = -EINVAL; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (nbytes > nx_ctx->ap->databytelen) goto out; @@ -255,6 +258,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) -EBADMSG : 0; } out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 93923e4628c0..658da0fd3e1f 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -89,8 +89,11 @@ static int nx_xcbc_update(struct shash_desc *desc, struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; u32 to_process, leftover; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously and we're updating again, * so copy over the partial digest */ @@ -158,6 +161,7 @@ static int nx_xcbc_update(struct shash_desc *desc, /* everything after the first update is continuation */ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -167,8 +171,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -211,6 +218,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 254b01abef64..6547a7104bf6 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -57,8 +57,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct nx_sg *in_sg; u64 to_process, leftover, total; u32 max_sg_len; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + /* 2 cases for total data len: * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover @@ -136,6 +139,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf, data, leftover); sctx->count = leftover; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -146,8 +150,11 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; + unsigned long irq_flags; int rc; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { @@ -186,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -195,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct sha256_state *octx = out; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); octx->count = sctx->count + (csbcpb->cpb.sha256.message_bit_length / 8); @@ -217,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) octx->state[7] = SHA256_H7; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } @@ -226,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; const struct sha256_state *ictx = in; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); @@ -240,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 2d6d91359833..236e6afeab10 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -57,8 +57,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_sg *in_sg; u64 to_process, leftover, total, spbc_bits; u32 max_sg_len; + unsigned long irq_flags; int rc = 0; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + /* 2 cases for total data len: * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover @@ -138,6 +141,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf, data, leftover); sctx->count[0] = leftover; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -149,8 +153,11 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct nx_sg *in_sg, *out_sg; u32 max_sg_len; u64 count0; + unsigned long irq_flags; int rc; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { @@ -193,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -202,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct sha512_state *octx = out; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* move message_bit_length (128 bits) into count and convert its value * to bytes */ @@ -233,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) octx->state[7] = SHA512_H7; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } @@ -242,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; const struct sha512_state *ictx = in; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->count[0] = ictx->count[0] & 0x3f; @@ -259,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index ad07dc62b95a..bdf4990f9758 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, do { rc = vio_h_cop_sync(viodev, op); - } while ((rc == -EBUSY && !may_sleep && retries--) || - (rc == -EBUSY && may_sleep && cond_resched())); + } while (rc == -EBUSY && !may_sleep && retries--); if (rc) { dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " @@ -251,6 +250,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, */ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) { + spin_lock_init(&nx_ctx->lock); memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 3232b182dd28..14bb97f1c339 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -117,6 +117,7 @@ struct nx_ctr_priv { }; struct nx_crypto_ctx { + spinlock_t lock; /* synchronize access to the context */ void *kmem; /* unaligned, kmalloc'd buffer */ size_t kmem_len; /* length of kmem */ struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ -- cgit v1.2.3 From 9dc48034596f3ef47ec5eb820f4dde4129d83a56 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 14 Aug 2013 11:11:13 +0200 Subject: hwrng: tx4939 - simplify use of devm_ioremap_resource Remove unneeded error handling on the result of a call to platform_get_resource when the value is passed to devm_ioremap_resource. Move the call to platform_get_resource adjacent to the call to devm_ioremap_resource to make the connection between them more clear. A simplified version of the semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression pdev,res,n,e,e1; expression ret != 0; identifier l; @@ - res = platform_get_resource(pdev, IORESOURCE_MEM, n); ... when != res - if (res == NULL) { ... \(goto l;\|return ret;\) } ... when != res + res = platform_get_resource(pdev, IORESOURCE_MEM, n); e = devm_ioremap_resource(e1, res); // Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- drivers/char/hw_random/tx4939-rng.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c index 00593c847cf0..09c5fbea2b93 100644 --- a/drivers/char/hw_random/tx4939-rng.c +++ b/drivers/char/hw_random/tx4939-rng.c @@ -110,12 +110,10 @@ static int __init tx4939_rng_probe(struct platform_device *dev) struct resource *r; int i; - r = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!r) - return -EBUSY; rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); if (!rngdev) return -ENOMEM; + r = platform_get_resource(dev, IORESOURCE_MEM, 0); rngdev->base = devm_ioremap_resource(&dev->dev, r); if (IS_ERR(rngdev->base)) return PTR_ERR(rngdev->base); -- cgit v1.2.3 From 2a128b4b74e559e585028d0205e9729271530637 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 14 Aug 2013 15:52:57 +0200 Subject: crypto: camellia-x86-64 - replace commas by semicolons and adjust code alignment Adjust alignment and replace commas by semicolons in automatically generated code. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- arch/x86/crypto/camellia_glue.c | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index fa4c1b9d9ea9..c171dcbf192d 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -828,8 +828,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) subRL[1] ^= (subRL[1] & ~subRL[9]) << 32; /* modified for FLinv(kl2) */ - dw = (subRL[1] & subRL[9]) >> 32, - subRL[1] ^= rol32(dw, 1); + dw = (subRL[1] & subRL[9]) >> 32; + subRL[1] ^= rol32(dw, 1); /* round 8 */ subRL[11] ^= subRL[1]; @@ -840,8 +840,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) subRL[1] ^= (subRL[1] & ~subRL[17]) << 32; /* modified for FLinv(kl4) */ - dw = (subRL[1] & subRL[17]) >> 32, - subRL[1] ^= rol32(dw, 1); + dw = (subRL[1] & subRL[17]) >> 32; + subRL[1] ^= rol32(dw, 1); /* round 14 */ subRL[19] ^= subRL[1]; @@ -859,8 +859,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) } else { subRL[1] ^= (subRL[1] & ~subRL[25]) << 32; /* modified for FLinv(kl6) */ - dw = (subRL[1] & subRL[25]) >> 32, - subRL[1] ^= rol32(dw, 1); + dw = (subRL[1] & subRL[25]) >> 32; + subRL[1] ^= rol32(dw, 1); /* round 20 */ subRL[27] ^= subRL[1]; @@ -882,8 +882,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) kw4 ^= (kw4 & ~subRL[24]) << 32; /* modified for FL(kl5) */ - dw = (kw4 & subRL[24]) >> 32, - kw4 ^= rol32(dw, 1); + dw = (kw4 & subRL[24]) >> 32; + kw4 ^= rol32(dw, 1); } /* round 17 */ @@ -895,8 +895,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) kw4 ^= (kw4 & ~subRL[16]) << 32; /* modified for FL(kl3) */ - dw = (kw4 & subRL[16]) >> 32, - kw4 ^= rol32(dw, 1); + dw = (kw4 & subRL[16]) >> 32; + kw4 ^= rol32(dw, 1); /* round 11 */ subRL[14] ^= kw4; @@ -907,8 +907,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) kw4 ^= (kw4 & ~subRL[8]) << 32; /* modified for FL(kl1) */ - dw = (kw4 & subRL[8]) >> 32, - kw4 ^= rol32(dw, 1); + dw = (kw4 & subRL[8]) >> 32; + kw4 ^= rol32(dw, 1); /* round 5 */ subRL[6] ^= kw4; @@ -928,8 +928,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */ tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); - dw = tl & (subRL[8] >> 32), /* FL(kl1) */ - tr = subRL[10] ^ rol32(dw, 1); + dw = tl & (subRL[8] >> 32); /* FL(kl1) */ + tr = subRL[10] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */ @@ -937,8 +937,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */ tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); - dw = tl & (subRL[9] >> 32), /* FLinv(kl2) */ - tr = subRL[7] ^ rol32(dw, 1); + dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */ + tr = subRL[7] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */ @@ -948,8 +948,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */ tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); - dw = tl & (subRL[16] >> 32), /* FL(kl3) */ - tr = subRL[18] ^ rol32(dw, 1); + dw = tl & (subRL[16] >> 32); /* FL(kl3) */ + tr = subRL[18] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */ @@ -957,8 +957,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */ tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]); - dw = tl & (subRL[17] >> 32), /* FLinv(kl4) */ - tr = subRL[15] ^ rol32(dw, 1); + dw = tl & (subRL[17] >> 32); /* FLinv(kl4) */ + tr = subRL[15] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */ @@ -972,8 +972,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */ } else { tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]); - dw = tl & (subRL[24] >> 32), /* FL(kl5) */ - tr = subRL[26] ^ rol32(dw, 1); + dw = tl & (subRL[24] >> 32); /* FL(kl5) */ + tr = subRL[26] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */ @@ -981,8 +981,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */ tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]); - dw = tl & (subRL[25] >> 32), /* FLinv(kl6) */ - tr = subRL[23] ^ rol32(dw, 1); + dw = tl & (subRL[25] >> 32); /* FLinv(kl6) */ + tr = subRL[23] ^ rol32(dw, 1); tt = (tr | ((u64)tl << 32)); SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */ -- cgit v1.2.3 From 452ec0498f98304d4107842075062fe429ce7804 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 14 Aug 2013 15:52:58 +0200 Subject: crypto: camellia_generic - replace commas by semicolons and adjust code alignment Adjust alignment and replace commas by semicolons in automatically generated code. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- crypto/camellia_generic.c | 48 +++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 75efa2052305..26bcd7a2d6b4 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -388,8 +388,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* round 6 */ subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; - dw = subL[1] & subL[9], - subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ + dw = subL[1] & subL[9]; + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ @@ -397,8 +397,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* round 12 */ subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; - dw = subL[1] & subL[17], - subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ + dw = subL[1] & subL[17]; + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ @@ -413,8 +413,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) kw4l = subL[25]; kw4r = subR[25]; } else { subL[1] ^= subR[1] & ~subR[25]; - dw = subL[1] & subL[25], - subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ + dw = subL[1] & subL[25]; + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ /* round 20 */ subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ @@ -433,8 +433,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* round 19 */ subL[26] ^= kw4l; subR[26] ^= kw4r; kw4l ^= kw4r & ~subR[24]; - dw = kw4l & subL[24], - kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ + dw = kw4l & subL[24]; + kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ } /* round 17 */ subL[22] ^= kw4l; subR[22] ^= kw4r; @@ -443,8 +443,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* round 13 */ subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; - dw = kw4l & subL[16], - kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ + dw = kw4l & subL[16]; + kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ @@ -452,8 +452,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* round 7 */ subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; - dw = kw4l & subL[8], - kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ + dw = kw4l & subL[8]; + kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ @@ -477,8 +477,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); - dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ rol32(dw, 1); + dw = tl & subL[8]; /* FL(kl1) */ + tr = subR[10] ^ rol32(dw, 1); SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ SUBKEY_R(7) = subR[6] ^ tr; SUBKEY_L(8) = subL[8]; /* FL(kl1) */ @@ -486,8 +486,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); - dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ rol32(dw, 1); + dw = tl & subL[9]; /* FLinv(kl2) */ + tr = subR[7] ^ rol32(dw, 1); SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ SUBKEY_R(10) = tr ^ subR[11]; SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ @@ -499,8 +499,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); - dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ rol32(dw, 1); + dw = tl & subL[16]; /* FL(kl3) */ + tr = subR[18] ^ rol32(dw, 1); SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ SUBKEY_R(15) = subR[14] ^ tr; SUBKEY_L(16) = subL[16]; /* FL(kl3) */ @@ -508,8 +508,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); - dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ rol32(dw, 1); + dw = tl & subL[17]; /* FLinv(kl4) */ + tr = subR[15] ^ rol32(dw, 1); SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ SUBKEY_R(18) = tr ^ subR[19]; SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ @@ -527,8 +527,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(24) = subR[24] ^ subR[23]; } else { tl = subL[26] ^ (subR[26] & ~subR[24]); - dw = tl & subL[24], /* FL(kl5) */ - tr = subR[26] ^ rol32(dw, 1); + dw = tl & subL[24]; /* FL(kl5) */ + tr = subR[26] ^ rol32(dw, 1); SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ SUBKEY_R(23) = subR[22] ^ tr; SUBKEY_L(24) = subL[24]; /* FL(kl5) */ @@ -536,8 +536,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ SUBKEY_R(25) = subR[25]; tl = subL[23] ^ (subR[23] & ~subR[25]); - dw = tl & subL[25], /* FLinv(kl6) */ - tr = subR[23] ^ rol32(dw, 1); + dw = tl & subL[25]; /* FLinv(kl6) */ + tr = subR[23] ^ rol32(dw, 1); SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ SUBKEY_R(26) = tr ^ subR[27]; SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ -- cgit v1.2.3 From edb6f29464afc65fc73767540b854abf63ae7144 Mon Sep 17 00:00:00 2001 From: John Haxby Date: Wed, 14 Aug 2013 16:23:18 +0100 Subject: crypto: xor - Check for osxsave as well as avx in crypto/xor This affects xen pv guests with sufficiently old versions of xen and sufficiently new hardware. On such a system, a guest with a btrfs root won't even boot. Signed-off-by: John Haxby Signed-off-by: Herbert Xu --- arch/x86/include/asm/xor_avx.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h index 7ea79c5fa1f2..492b29802f57 100644 --- a/arch/x86/include/asm/xor_avx.h +++ b/arch/x86/include/asm/xor_avx.h @@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = { #define AVX_XOR_SPEED \ do { \ - if (cpu_has_avx) \ + if (cpu_has_avx && cpu_has_osxsave) \ xor_speed(&xor_block_avx); \ } while (0) #define AVX_SELECT(FASTEST) \ - (cpu_has_avx ? &xor_block_avx : FASTEST) + (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST) #else -- cgit v1.2.3 From 514df2816ffb176c76c344f7d1b83e79a490e4d0 Mon Sep 17 00:00:00 2001 From: Alex Porosanu Date: Wed, 14 Aug 2013 18:56:45 +0300 Subject: crypto: caam - replace xstr macro with __stringify CAAM driver contains one macro (xstr) used for printing the line location in a file where a memdump is done. This patch replaces the xstr macro with the already existing __stringify macro that performs the same function. Signed-off-by: Alex Porosanu Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 78 +++++++++++++++++++++--------------------- drivers/crypto/caam/caamhash.c | 68 ++++++++++++++++++------------------ drivers/crypto/caam/key_gen.c | 6 ++-- 3 files changed, 77 insertions(+), 75 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7a9052c44238..7c63b72ecd75 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -82,8 +82,6 @@ #ifdef DEBUG /* for print_hex_dumps with line references */ -#define xstr(s) str(s) -#define str(s) #s #define debug(format, arg...) printk(format, arg) #else #define debug(format, arg...) @@ -283,7 +281,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -351,7 +349,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -434,7 +432,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -498,7 +496,7 @@ static int aead_setkey(struct crypto_aead *aead, keylen, enckeylen, authkeylen); printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", ctx->split_key_len, ctx->split_key_pad_len); - print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); #endif @@ -517,7 +515,7 @@ static int aead_setkey(struct crypto_aead *aead, return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ctx->split_key_pad_len + enckeylen, 1); #endif @@ -547,7 +545,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, u32 *desc; #ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); #endif @@ -596,7 +594,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "ablkcipher enc shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -641,7 +640,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "ablkcipher dec shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -778,13 +778,13 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_unmap(jrdev, edesc, req); #ifdef DEBUG - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), req->assoclen , 1); - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->src_nents ? 100 : req->cryptlen + ctx->authsize + 4, 1); @@ -812,10 +812,10 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, offsetof(struct aead_edesc, hw_desc)); #ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), req->cryptlen, 1); #endif @@ -835,7 +835,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, err = -EBADMSG; #ifdef DEBUG - print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), sizeof(struct iphdr) + req->assoclen + @@ -843,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ctx->authsize + 36, 1); if (!err && edesc->sec4_sg_bytes) { struct scatterlist *sg = sg_last(req->src, edesc->src_nents); - print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), sg->length + ctx->authsize + 16, 1); } @@ -876,10 +876,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, } #ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif @@ -911,10 +911,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, } #ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->dst_nents > 1 ? 100 : req->nbytes, 1); #endif @@ -945,16 +945,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, #ifdef DEBUG debug("assoclen %d cryptlen %d authsize %d\n", req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->src_nents ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, desc_bytes(sh_desc), 1); #endif @@ -1023,15 +1023,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, #ifdef DEBUG debug("assoclen %d cryptlen %d authsize %d\n", req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, desc_bytes(sh_desc), 1); #endif @@ -1084,10 +1084,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, int len, sec4_sg_index = 0; #ifdef DEBUG - print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), edesc->src_nents ? 100 : req->nbytes, 1); #endif @@ -1245,7 +1245,7 @@ static int aead_encrypt(struct aead_request *req) init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, all_contig, true); #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif @@ -1279,7 +1279,7 @@ static int aead_decrypt(struct aead_request *req) return PTR_ERR(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), req->cryptlen, 1); #endif @@ -1288,7 +1288,7 @@ static int aead_decrypt(struct aead_request *req) init_aead_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req, all_contig, false); #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif @@ -1435,7 +1435,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) return PTR_ERR(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), req->cryptlen, 1); #endif @@ -1444,7 +1444,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) init_aead_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, edesc, req, contig); #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif @@ -1544,7 +1544,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->iv_dma = iv_dma; #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, sec4_sg_bytes, 1); #endif @@ -1573,7 +1573,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req) init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, iv_contig); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif @@ -1611,7 +1611,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) ctx->sh_desc_dec_dma, edesc, req, iv_contig); desc = edesc->hw_desc; #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 77ad2b68f8f3..aa65c6e0c430 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -89,8 +89,6 @@ #ifdef DEBUG /* for print_hex_dumps with line references */ -#define xstr(s) str(s) -#define str(s) #s #define debug(format, arg...) printk(format, arg) #else #define debug(format, arg...) @@ -329,7 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "ahash update shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -347,7 +346,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "ahash update first shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -364,7 +364,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -382,7 +382,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -401,7 +401,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "ahash digest shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -462,9 +463,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, LDST_SRCDST_BYTE_CONTEXT); #ifdef DEBUG - print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -477,7 +478,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, wait_for_completion_interruptible(&result.completion); ret = result.err; #ifdef DEBUG - print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, + "digested key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize, 1); #endif @@ -528,7 +530,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, #ifdef DEBUG printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", ctx->split_key_len, ctx->split_key_pad_len); - print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); #endif @@ -543,7 +545,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, return -ENOMEM; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ctx->split_key_pad_len, 1); #endif @@ -636,11 +638,11 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) - print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); #endif @@ -674,11 +676,11 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) - print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); #endif @@ -712,11 +714,11 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) - print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); #endif @@ -750,11 +752,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) - print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); #endif @@ -850,7 +852,7 @@ static int ahash_update_ctx(struct ahash_request *req) append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -869,9 +871,9 @@ static int ahash_update_ctx(struct ahash_request *req) *next_buflen = last_buflen; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1); #endif @@ -935,7 +937,7 @@ static int ahash_final_ctx(struct ahash_request *req) digestsize); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1014,7 +1016,7 @@ static int ahash_finup_ctx(struct ahash_request *req) digestsize); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1084,7 +1086,7 @@ static int ahash_digest(struct ahash_request *req) digestsize); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1138,7 +1140,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) edesc->src_nents = 0; #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1226,7 +1228,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1248,9 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) *next_buflen = 0; } #ifdef DEBUG - print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1); #endif @@ -1319,7 +1321,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) digestsize); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1412,7 +1414,7 @@ static int ahash_update_first(struct ahash_request *req) map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); #ifdef DEBUG - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -1436,7 +1438,7 @@ static int ahash_update_first(struct ahash_request *req) sg_copy(next_buf, req->src, req->nbytes); } #ifdef DEBUG - print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1); #endif diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 87138d2adb5f..ea2e406610eb 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -95,9 +95,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -110,7 +110,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, wait_for_completion_interruptible(&result.completion); ret = result.err; #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_out, split_key_pad_len, 1); #endif -- cgit v1.2.3 From d4d8edf88544e4df694d32594c3dde63f82be960 Mon Sep 17 00:00:00 2001 From: Alex Porosanu Date: Wed, 14 Aug 2013 18:56:46 +0300 Subject: crypto: caam - add option for enabling DEBUG mode This patch adds an option to the Kconfig file for SEC which enables the user to see the debug messages that are printed inside the SEC driver. Signed-off-by: Alex Porosanu Signed-off-by: Herbert Xu --- drivers/crypto/caam/Kconfig | 8 ++++++++ drivers/crypto/caam/Makefile | 3 +++ 2 files changed, 11 insertions(+) diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index b44091c47f75..ca89f6b84b06 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -98,3 +98,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. + +config CRYPTO_DEV_FSL_CAAM_DEBUG + bool "Enable debug output in CAAM driver" + depends on CRYPTO_DEV_FSL_CAAM + default n + help + Selecting this will enable printing of various debug + information in the CAAM driver. diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index b1eb44838db5..d56bd0ec65d8 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile @@ -1,6 +1,9 @@ # # Makefile for the CAAM backend and dependent components # +ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) + EXTRA_CFLAGS := -DDEBUG +endif obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o -- cgit v1.2.3 From b4eba0ca341fb6ad0199ab3f2244aa7e0c4cb34d Mon Sep 17 00:00:00 2001 From: "jmlatten@linux.vnet.ibm.com" Date: Wed, 14 Aug 2013 17:17:57 -0500 Subject: crypto: nx - fix nx-aes-gcm verification This patch fixes a bug in the nx-aes-gcm implementation. Corrected the code so that the authtag is always verified after decrypting and not just when there is associated data included. Also, corrected the code to retrieve the input authtag from src instead of dst. Reviewed-by: Fionnuala Gunter Reviewed-by: Marcelo Cerri Signed-off-by: Joy Latten Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-gcm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index df90d03afc10..74feee10f943 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -246,11 +246,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); - } else if (req->assoclen) { + } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy(itag, req->dst, nbytes, + scatterwalk_map_and_copy(itag, req->src, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, -- cgit v1.2.3 From 257aff515409f9455edff3a946344e71baf9e116 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:22 -0500 Subject: crypto: scatterwalk - Add support for calculating number of SG elements Crypto layer only passes nbytes to encrypt but in omap-aes driver we need to know number of SG elements to pass to dmaengine slave API. We add function for the same to scatterwalk library. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 22 ++++++++++++++++++++++ include/crypto/scatterwalk.h | 2 ++ 2 files changed, 24 insertions(+) diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 7281b8a93ad3..79ca2278c2a3 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -124,3 +124,25 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, scatterwalk_done(&walk, out, 0); } EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); + +int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) +{ + int offset = 0, n = 0; + + /* num_bytes is too small */ + if (num_bytes < sg->length) + return -1; + + do { + offset += sg->length; + n++; + sg = scatterwalk_sg_next(sg); + + /* num_bytes is too large */ + if (unlikely(!sg && (num_bytes < offset))) + return -1; + } while (sg && (num_bytes > offset)); + + return n; +} +EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 3744d2a642df..13621cc8cf4c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -113,4 +113,6 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); +int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); + #endif /* _CRYPTO_SCATTERWALK_H */ -- cgit v1.2.3 From 016af9b5c51e58ecda573f14dabe85a67363b20f Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sun, 18 Aug 2013 00:56:11 -0500 Subject: crypto: omap-aes - Add useful debug macros When DEBUG is enabled, these macros can be used to print variables in integer and hex format, and clearly display which registers, offsets and values are being read/written , including printing the names of the offsets and their values. Using statement expression macros in read path as, Suggested-by: Joe Perches Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 5f7980586850..e5b2120a12b9 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -13,7 +13,9 @@ * */ -#define pr_fmt(fmt) "%s: " fmt, __func__ +#define pr_fmt(fmt) "%20s: " fmt, __func__ +#define prn(num) pr_debug(#num "=%d\n", num) +#define prx(num) pr_debug(#num "=%x\n", num) #include #include @@ -172,16 +174,36 @@ struct omap_aes_dev { static LIST_HEAD(dev_list); static DEFINE_SPINLOCK(list_lock); +#ifdef DEBUG +#define omap_aes_read(dd, offset) \ +({ \ + int _read_ret; \ + _read_ret = __raw_readl(dd->io_base + offset); \ + pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ + offset, _read_ret); \ + _read_ret; \ +}) +#else static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) { return __raw_readl(dd->io_base + offset); } +#endif +#ifdef DEBUG +#define omap_aes_write(dd, offset, value) \ + do { \ + pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ + offset, value); \ + __raw_writel(value, dd->io_base + offset); \ + } while (0) +#else static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value) { __raw_writel(value, dd->io_base + offset); } +#endif static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, u32 value, u32 mask) -- cgit v1.2.3 From e77c756eca76077032db23d8d4b85dcd743742b4 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:24 -0500 Subject: crypto: omap-aes - Populate number of SG elements Crypto layer only passes nbytes but number of SG elements is needed for mapping or unmapping SGs at one time using dma_map* API and also needed to pass in for dmaengine prep function. We call function added to scatterwalk for this purpose in omap_aes_handle_queue to populate the values which are used later. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index e5b2120a12b9..1cad12ef4f55 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -165,6 +165,8 @@ struct omap_aes_dev { void *buf_out; int dma_out; struct dma_chan *dma_lch_out; + int in_sg_len; + int out_sg_len; dma_addr_t dma_addr_out; const struct omap_aes_pdata *pdata; @@ -725,6 +727,10 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, dd->out_offset = 0; dd->out_sg = req->dst; + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); + BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); + rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; -- cgit v1.2.3 From 4b645c9465065bb3f8fb717789e864aa6d675052 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:25 -0500 Subject: crypto: omap-aes - Simplify DMA usage by using direct SGs In early version of this driver, assumptions were made such as DMA layer requires contiguous buffers etc. Due to this, new buffers were allocated, mapped and used for DMA. These assumptions are no longer true and DMAEngine scatter-gather DMA doesn't have such requirements. We simply the DMA operations by directly using the scatter-gather buffers provided by the crypto layer instead of creating our own. Lot of logic that handled DMA'ing only X number of bytes of the total, or as much as fitted into a 3rd party buffer is removed and is no longer required. Also, good performance improvement of atleast ~20% seen with encrypting a buffer size of 8K (1800 ops/sec vs 1400 ops/sec). Improvement will be higher for much larger blocks though such benchmarking is left as an exercise for the reader. Also DMA usage is much more simplified and coherent with rest of the code. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 147 ++++++++-------------------------------------- 1 file changed, 25 insertions(+), 122 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 1cad12ef4f55..114c55a59efe 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -475,22 +475,14 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, } static int omap_aes_crypt_dma(struct crypto_tfm *tfm, - struct scatterlist *in_sg, struct scatterlist *out_sg) + struct scatterlist *in_sg, struct scatterlist *out_sg, + int in_sg_len, int out_sg_len) { struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct omap_aes_dev *dd = ctx->dd; struct dma_async_tx_descriptor *tx_in, *tx_out; struct dma_slave_config cfg; - dma_addr_t dma_addr_in = sg_dma_address(in_sg); - int ret, length = sg_dma_len(in_sg); - - pr_debug("len: %d\n", length); - - dd->dma_size = length; - - if (!(dd->flags & FLAGS_FAST)) - dma_sync_single_for_device(dd->dev, dma_addr_in, length, - DMA_TO_DEVICE); + int ret; memset(&cfg, 0, sizeof(cfg)); @@ -509,7 +501,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, return ret; } - tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, + tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!tx_in) { @@ -528,7 +520,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, return ret; } - tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!tx_out) { @@ -546,7 +538,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_async_issue_pending(dd->dma_lch_out); /* start DMA */ - dd->pdata->trigger(dd, length); + dd->pdata->trigger(dd, dd->total); return 0; } @@ -555,93 +547,28 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm( crypto_ablkcipher_reqtfm(dd->req)); - int err, fast = 0, in, out; - size_t count; - dma_addr_t addr_in, addr_out; - struct scatterlist *in_sg, *out_sg; - int len32; + int err; pr_debug("total: %d\n", dd->total); - if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { - /* check for alignment */ - in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); - out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); - - fast = in && out; + err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; } - if (fast) { - count = min(dd->total, sg_dma_len(dd->in_sg)); - count = min(count, sg_dma_len(dd->out_sg)); - - if (count != dd->total) { - pr_err("request length != buffer length\n"); - return -EINVAL; - } - - pr_debug("fast\n"); - - err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; - } - - err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - return -EINVAL; - } - - addr_in = sg_dma_address(dd->in_sg); - addr_out = sg_dma_address(dd->out_sg); - - in_sg = dd->in_sg; - out_sg = dd->out_sg; - - dd->flags |= FLAGS_FAST; - - } else { - /* use cache buffers */ - count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, - dd->buflen, dd->total, 0); - - len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; - - /* - * The data going into the AES module has been copied - * to a local buffer and the data coming out will go - * into a local buffer so set up local SG entries for - * both. - */ - sg_init_table(&dd->in_sgl, 1); - dd->in_sgl.offset = dd->in_offset; - sg_dma_len(&dd->in_sgl) = len32; - sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; - - sg_init_table(&dd->out_sgl, 1); - dd->out_sgl.offset = dd->out_offset; - sg_dma_len(&dd->out_sgl) = len32; - sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; - - in_sg = &dd->in_sgl; - out_sg = &dd->out_sgl; - - addr_in = dd->dma_addr_in; - addr_out = dd->dma_addr_out; - - dd->flags &= ~FLAGS_FAST; - + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; } - dd->total -= count; - - err = omap_aes_crypt_dma(tfm, in_sg, out_sg); + err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, + dd->out_sg_len); if (err) { - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); } return err; @@ -661,7 +588,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { int err = 0; - size_t count; pr_debug("total: %d\n", dd->total); @@ -670,21 +596,8 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - if (dd->flags & FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - } else { - dma_sync_single_for_device(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* copy data */ - count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, - dd->buflen, dd->dma_size, 1); - if (count != dd->dma_size) { - err = -EINVAL; - pr_err("not all data converted: %u\n", count); - } - } + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); return err; } @@ -754,21 +667,11 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; - int err; - - pr_debug("enter\n"); - err = omap_aes_crypt_dma_stop(dd); - - err = dd->err ? : err; - - if (dd->total && !err) { - err = omap_aes_crypt_dma_start(dd); - if (!err) - return; /* DMA started. Not fininishing. */ - } + pr_debug("enter done_task\n"); - omap_aes_finish_req(dd, err); + omap_aes_crypt_dma_stop(dd); + omap_aes_finish_req(dd, 0); omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); -- cgit v1.2.3 From 0a641712ef9459df5d8c19fc19e686887257b3e3 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:26 -0500 Subject: crypto: omap-aes - Sync SG before DMA operation Earlier functions that did a similar sync are replaced by the dma_sync_sg_* which can operate on entire SG list. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 114c55a59efe..4ed2a8c40394 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -484,6 +484,8 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, struct dma_slave_config cfg; int ret; + dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); + memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); @@ -670,6 +672,8 @@ static void omap_aes_done_task(unsigned long data) pr_debug("enter done_task\n"); + dma_sync_sg_for_cpu(dd->dev, dd->in_sg, dd->in_sg_len, DMA_FROM_DEVICE); + omap_aes_crypt_dma_stop(dd); omap_aes_finish_req(dd, 0); omap_aes_handle_queue(dd, NULL); -- cgit v1.2.3 From 0c063a22d50606e5e405e954be2de3810372dc27 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:27 -0500 Subject: crypto: omap-aes - Remove previously used intermediate buffers Intermdiate buffers were allocated, mapped and used for DMA. These are no longer required as we use the SGs from crypto layer directly in previous commits in the series. Also along with it, remove the logic for copying SGs etc as they are no longer used, and all the associated variables in omap_aes_device. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 90 ----------------------------------------------- 1 file changed, 90 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 4ed2a8c40394..81f0e848b8d1 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -150,25 +150,13 @@ struct omap_aes_dev { struct ablkcipher_request *req; size_t total; struct scatterlist *in_sg; - struct scatterlist in_sgl; - size_t in_offset; struct scatterlist *out_sg; - struct scatterlist out_sgl; - size_t out_offset; - - size_t buflen; - void *buf_in; - size_t dma_size; int dma_in; struct dma_chan *dma_lch_in; - dma_addr_t dma_addr_in; - void *buf_out; int dma_out; struct dma_chan *dma_lch_out; int in_sg_len; int out_sg_len; - dma_addr_t dma_addr_out; - const struct omap_aes_pdata *pdata; }; @@ -347,33 +335,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) dd->dma_lch_out = NULL; dd->dma_lch_in = NULL; - dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); - dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); - dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; - dd->buflen &= ~(AES_BLOCK_SIZE - 1); - - if (!dd->buf_in || !dd->buf_out) { - dev_err(dd->dev, "unable to alloc pages.\n"); - goto err_alloc; - } - - /* MAP here */ - dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_in; - } - - dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, - DMA_FROM_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_out; - } - dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -400,14 +361,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) err_dma_out: dma_release_channel(dd->dma_lch_in); err_dma_in: - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); -err_map_out: - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); -err_map_in: - free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); - free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); -err_alloc: if (err) pr_err("error: %d\n", err); return err; @@ -417,11 +370,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) { dma_release_channel(dd->dma_lch_out); dma_release_channel(dd->dma_lch_in); - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); - free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); - free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); } static void sg_copy_buf(void *buf, struct scatterlist *sg, @@ -438,42 +386,6 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg, scatterwalk_done(&walk, out, 0); } -static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, - size_t buflen, size_t total, int out) -{ - unsigned int count, off = 0; - - while (buflen && total) { - count = min((*sg)->length - *offset, total); - count = min(count, buflen); - - if (!count) - return off; - - /* - * buflen and total are AES_BLOCK_SIZE size aligned, - * so count should be also aligned - */ - - sg_copy_buf(buf + off, *sg, *offset, count, out); - - off += count; - buflen -= count; - *offset += count; - total -= count; - - if (*offset == (*sg)->length) { - *sg = sg_next(*sg); - if (*sg) - *offset = 0; - else - total = 0; - } - } - - return off; -} - static int omap_aes_crypt_dma(struct crypto_tfm *tfm, struct scatterlist *in_sg, struct scatterlist *out_sg, int in_sg_len, int out_sg_len) @@ -637,9 +549,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, /* assign new request to device */ dd->req = req; dd->total = req->nbytes; - dd->in_offset = 0; dd->in_sg = req->src; - dd->out_offset = 0; dd->out_sg = req->dst; dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); -- cgit v1.2.3 From 67216756ea8b9bfa738e0f0a263094f459ab1a12 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:28 -0500 Subject: crypto: omap-aes - Add IRQ info and helper macros Add IRQ information to pdata and helper macros. These are required for PIO-mode support. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 81f0e848b8d1..68c44253137d 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -76,6 +76,10 @@ #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) +#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) +#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) +#define AES_REG_IRQ_DATA_IN BIT(1) +#define AES_REG_IRQ_DATA_OUT BIT(2) #define DEFAULT_TIMEOUT (5*HZ) #define FLAGS_MODE_MASK 0x000f @@ -121,6 +125,8 @@ struct omap_aes_pdata { u32 data_ofs; u32 rev_ofs; u32 mask_ofs; + u32 irq_enable_ofs; + u32 irq_status_ofs; u32 dma_enable_in; u32 dma_enable_out; @@ -847,6 +853,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { .data_ofs = 0x60, .rev_ofs = 0x80, .mask_ofs = 0x84, + .irq_status_ofs = 0x8c, + .irq_enable_ofs = 0x90, .dma_enable_in = BIT(5), .dma_enable_out = BIT(6), .major_mask = 0x0700, -- cgit v1.2.3 From 1bf95cca8f407cc0d6f21708fdbb17d1dd531bec Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:29 -0500 Subject: crypto: omap-aes - PIO mode: Add IRQ handler and walk SGs We add an IRQ handler that implements a state-machine for PIO-mode and data structures for walking the scatter-gather list. The IRQ handler is called in succession both when data is available to read or next data can be sent for processing. This process continues till the entire in/out SG lists have been walked. Once the SG-list has been completely walked, the IRQ handler schedules the done_task tasklet. Also add a useful macro that is used through out the IRQ code for a common pattern of calculating how much an SG list has been walked. This improves code readability and avoids checkpatch errors. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 68c44253137d..9909f93255b4 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -40,6 +40,8 @@ #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) + /* OMAP TRM gives bitfields as start:end, where start is the higher bit number. For example 7:0 */ #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) @@ -92,6 +94,8 @@ #define FLAGS_FAST BIT(5) #define FLAGS_BUSY BIT(6) +#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) + struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -157,6 +161,8 @@ struct omap_aes_dev { size_t total; struct scatterlist *in_sg; struct scatterlist *out_sg; + struct scatter_walk in_walk; + struct scatter_walk out_walk; int dma_in; struct dma_chan *dma_lch_in; int dma_out; @@ -863,6 +869,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { .minor_shift = 0, }; +static irqreturn_t omap_aes_irq(int irq, void *dev_id) +{ + struct omap_aes_dev *dd = dev_id; + u32 status, i; + u32 *src, *dst; + + status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); + if (status & AES_REG_IRQ_DATA_IN) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->in_sg); + + BUG_ON(_calc_walked(in) > dd->in_sg->length); + + src = sg_virt(dd->in_sg) + _calc_walked(in); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); + + scatterwalk_advance(&dd->in_walk, 4); + if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg = scatterwalk_sg_next(dd->in_sg); + if (dd->in_sg) { + scatterwalk_start(&dd->in_walk, + dd->in_sg); + src = sg_virt(dd->in_sg) + + _calc_walked(in); + } + } else { + src++; + } + } + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_IN; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + /* Enable DATA_OUT interrupt */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); + + } else if (status & AES_REG_IRQ_DATA_OUT) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->out_sg); + + BUG_ON(_calc_walked(out) > dd->out_sg->length); + + dst = sg_virt(dd->out_sg) + _calc_walked(out); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); + scatterwalk_advance(&dd->out_walk, 4); + if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg = scatterwalk_sg_next(dd->out_sg); + if (dd->out_sg) { + scatterwalk_start(&dd->out_walk, + dd->out_sg); + dst = sg_virt(dd->out_sg) + + _calc_walked(out); + } + } else { + dst++; + } + } + + dd->total -= AES_BLOCK_SIZE; + + BUG_ON(dd->total < 0); + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_OUT; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + if (!dd->total) + /* All bytes read! */ + tasklet_schedule(&dd->done_task); + else + /* Enable DATA_IN interrupt for next block */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + } + + return IRQ_HANDLED; +} + static const struct of_device_id omap_aes_of_match[] = { { .compatible = "ti,omap2-aes", -- cgit v1.2.3 From 98837abc86ebd26c1518c91cc5e2a9344837e6a8 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:30 -0500 Subject: crypto: omap-aes - PIO mode: platform data for OMAP4/AM437x and trigger We initialize the scatter gather walk lists needed for PIO mode and avoid all DMA paths such as mapping/unmapping buffers by checking for the pio_only flag. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 9909f93255b4..685535e4c78d 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -169,6 +169,7 @@ struct omap_aes_dev { struct dma_chan *dma_lch_out; int in_sg_len; int out_sg_len; + int pio_only; const struct omap_aes_pdata *pdata; }; @@ -408,6 +409,16 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, struct dma_slave_config cfg; int ret; + if (dd->pio_only) { + scatterwalk_start(&dd->in_walk, dd->in_sg); + scatterwalk_start(&dd->out_walk, dd->out_sg); + + /* Enable DATAIN interrupt and let it take + care of the rest */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + return 0; + } + dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); memset(&cfg, 0, sizeof(cfg)); @@ -477,21 +488,25 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) pr_debug("total: %d\n", dd->total); - err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; - } + if (!dd->pio_only) { + err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, + DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } - err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } } err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, dd->out_sg_len); - if (err) { + if (err && !dd->pio_only) { dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); @@ -594,9 +609,11 @@ static void omap_aes_done_task(unsigned long data) pr_debug("enter done_task\n"); - dma_sync_sg_for_cpu(dd->dev, dd->in_sg, dd->in_sg_len, DMA_FROM_DEVICE); - - omap_aes_crypt_dma_stop(dd); + if (!dd->pio_only) { + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + omap_aes_crypt_dma_stop(dd); + } omap_aes_finish_req(dd, 0); omap_aes_handle_queue(dd, NULL); -- cgit v1.2.3 From 1801ad9483b796271a985ab71cfe26f7bbeae6dd Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:31 -0500 Subject: crypto: omap-aes - Switch to PIO mode during probe In cases where requesting for DMA channels fails for some reason, or channel numbers are not provided in DT or platform data, we switch to PIO-only mode also checking if platform provides IRQ numbers and interrupt register offsets in DT and platform data. All dma-only paths are avoided in this mode. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 685535e4c78d..7a08a152838a 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1075,7 +1075,7 @@ static int omap_aes_probe(struct platform_device *pdev) struct omap_aes_dev *dd; struct crypto_alg *algp; struct resource res; - int err = -ENOMEM, i, j; + int err = -ENOMEM, i, j, irq = -1; u32 reg; dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); @@ -1118,8 +1118,23 @@ static int omap_aes_probe(struct platform_device *pdev) tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); - if (err) - goto err_dma; + if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { + dd->pio_only = 1; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "can't get IRQ resource\n"); + goto err_irq; + } + + err = request_irq(irq, omap_aes_irq, 0, + dev_name(dev), dd); + if (err) { + dev_err(dev, "Unable to grab omap-aes IRQ\n"); + goto err_irq; + } + } + INIT_LIST_HEAD(&dd->list); spin_lock(&list_lock); @@ -1147,8 +1162,11 @@ err_algs: for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); - omap_aes_dma_cleanup(dd); -err_dma: + if (dd->pio_only) + free_irq(irq, dd); + else + omap_aes_dma_cleanup(dd); +err_irq: tasklet_kill(&dd->done_task); tasklet_kill(&dd->queue_task); pm_runtime_disable(dev); -- cgit v1.2.3 From 6242332ff2f3431c4fb6f4b21581f38f16569c13 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:32 -0500 Subject: crypto: omap-aes - Add support for cases of unaligned lengths For cases where offset/length of on any page of the input SG is not aligned by AES_BLOCK_SIZE, we copy all the pages from the input SG list into a contiguous buffer and prepare a single element SG list for this buffer with length as the total bytes to crypt. This is requried for cases such as when an SG list of 16 bytes total size contains 16 pages each containing 1 byte. DMA using the direct buffers of such instances is not possible. For this purpose, we first detect if the unaligned case and accordingly allocate enough number of pages to satisfy the request and prepare SG lists. We then copy data into the buffer, and copy data out of it on completion. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 86 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 7a08a152838a..2fd22ca6a58f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -158,9 +158,23 @@ struct omap_aes_dev { struct tasklet_struct queue_task; struct ablkcipher_request *req; + + /* + * total is used by PIO mode for book keeping so introduce + * variable total_save as need it to calc page_order + */ size_t total; + size_t total_save; + struct scatterlist *in_sg; struct scatterlist *out_sg; + + /* Buffers for copying for unaligned cases */ + struct scatterlist in_sgl; + struct scatterlist out_sgl; + struct scatterlist *orig_out; + int sgs_copied; + struct scatter_walk in_walk; struct scatter_walk out_walk; int dma_in; @@ -537,12 +551,51 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); - return err; } +int omap_aes_check_aligned(struct scatterlist *sg) +{ + while (sg) { + if (!IS_ALIGNED(sg->offset, 4)) + return -1; + if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) + return -1; + sg = sg_next(sg); + } + return 0; +} + +int omap_aes_copy_sgs(struct omap_aes_dev *dd) +{ + void *buf_in, *buf_out; + int pages; + + pages = get_order(dd->total); + + buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); + buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); + + if (!buf_in || !buf_out) { + pr_err("Couldn't allocated pages for unaligned cases.\n"); + return -1; + } + + dd->orig_out = dd->out_sg; + + sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); + + sg_init_table(&dd->in_sgl, 1); + sg_set_buf(&dd->in_sgl, buf_in, dd->total); + dd->in_sg = &dd->in_sgl; + + sg_init_table(&dd->out_sgl, 1); + sg_set_buf(&dd->out_sgl, buf_out, dd->total); + dd->out_sg = &dd->out_sgl; + + return 0; +} + static int omap_aes_handle_queue(struct omap_aes_dev *dd, struct ablkcipher_request *req) { @@ -576,9 +629,19 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, /* assign new request to device */ dd->req = req; dd->total = req->nbytes; + dd->total_save = req->nbytes; dd->in_sg = req->src; dd->out_sg = req->dst; + if (omap_aes_check_aligned(dd->in_sg) || + omap_aes_check_aligned(dd->out_sg)) { + if (omap_aes_copy_sgs(dd)) + pr_err("Failed to copy SGs for unaligned cases\n"); + dd->sgs_copied = 1; + } else { + dd->sgs_copied = 0; + } + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); @@ -606,14 +669,31 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + void *buf_in, *buf_out; + int pages; pr_debug("enter done_task\n"); if (!dd->pio_only) { dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); omap_aes_crypt_dma_stop(dd); } + + if (dd->sgs_copied) { + buf_in = sg_virt(&dd->in_sgl); + buf_out = sg_virt(&dd->out_sgl); + + sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); + + pages = get_order(dd->total_save); + free_pages((unsigned long)buf_in, pages); + free_pages((unsigned long)buf_out, pages); + } + omap_aes_finish_req(dd, 0); omap_aes_handle_queue(dd, NULL); -- cgit v1.2.3 From 05007c10ff76a9d6d85c007fe3a7f531611fda5c Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:33 -0500 Subject: crypto: omap-aes - Convert kzalloc to devm_kzalloc Use devm_kzalloc instead of kzalloc. With this change, there is no need to call kfree in error/exit paths. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 2fd22ca6a58f..1f3d816cc9e6 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1158,7 +1158,7 @@ static int omap_aes_probe(struct platform_device *pdev) int err = -ENOMEM, i, j, irq = -1; u32 reg; - dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); + dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); goto err_data; @@ -1251,7 +1251,6 @@ err_irq: tasklet_kill(&dd->queue_task); pm_runtime_disable(dev); err_res: - kfree(dd); dd = NULL; err_data: dev_err(dev, "initialization failed.\n"); @@ -1279,7 +1278,6 @@ static int omap_aes_remove(struct platform_device *pdev) tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - kfree(dd); dd = NULL; return 0; -- cgit v1.2.3 From bce2a22885e657099d9f2cf82269bb4629ba744d Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:34 -0500 Subject: crypto: omap-aes - Convert request_irq to devm_request_irq Keeps request_irq exit/error code paths simpler. Suggested-by: Lokesh Vutla Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 1f3d816cc9e6..ce791c2f81f7 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1207,7 +1207,7 @@ static int omap_aes_probe(struct platform_device *pdev) goto err_irq; } - err = request_irq(irq, omap_aes_irq, 0, + err = devm_request_irq(dev, irq, omap_aes_irq, 0, dev_name(dev), dd); if (err) { dev_err(dev, "Unable to grab omap-aes IRQ\n"); @@ -1242,9 +1242,7 @@ err_algs: for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); - if (dd->pio_only) - free_irq(irq, dd); - else + if (!dd->pio_only) omap_aes_dma_cleanup(dd); err_irq: tasklet_kill(&dd->done_task); -- cgit v1.2.3 From 1bbf643705c3f85f3fa2325807afae260f755f9a Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sat, 17 Aug 2013 21:42:35 -0500 Subject: crypto: omap-aes - Kconfig: Add build support for AM437x For AM437x SoC, ARCH_OMAP2 and ARCH_OMAP3 is not enabled in the defconfig. We follow same thing as SHA driver, and add depends on ARCH_OMAP2PLUS so that the config is selectable for AES driver on AM437x SoC builds. Signed-off-by: Joel Fernandes Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 62fb673b28df..f4fd837bcb82 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -255,7 +255,7 @@ config CRYPTO_DEV_OMAP_SHAM config CRYPTO_DEV_OMAP_AES tristate "Support for OMAP AES hw engine" - depends on ARCH_OMAP2 || ARCH_OMAP3 + depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS select CRYPTO_AES select CRYPTO_BLKCIPHER2 help -- cgit v1.2.3 From 393e661d6167c1b7444704191ea1d01aa3447894 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 20 Aug 2013 11:51:41 +0300 Subject: crypto: sahara - checking the wrong variable There is a typo here. "dev->hw_link[]" is an array, not a pointer, so the check is nonsense. We should be checking recently allocated "dev->hw_link[0]" instead. Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index b076d8110785..d7bb8bac36e9 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -955,7 +955,7 @@ static int sahara_probe(struct platform_device *pdev) dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), &dev->hw_phys_link[0], GFP_KERNEL); - if (!dev->hw_link) { + if (!dev->hw_link[0]) { dev_err(&pdev->dev, "Could not allocate hw links\n"); err = -ENOMEM; goto err_link; -- cgit v1.2.3 From 5bc357037476bf8d4623ab4ef0d1640b947c4625 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 20 Aug 2013 11:54:48 +0300 Subject: crypto: tegra-aes - bitwise vs logical and The bug here is that: while (eng_busy & (!icq_empty) & dma_busy) is never true because it's using bitwise instead of logical ANDs. The other bitwise AND conditions work as intended but I changed them as well for consistency. Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/tegra-aes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index 85ea7525fa36..2d58da972ae2 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c @@ -275,7 +275,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr, value = aes_readl(dd, TEGRA_AES_INTR_STATUS); eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; - } while (eng_busy & (!icq_empty)); + } while (eng_busy && !icq_empty); aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); } @@ -365,7 +365,7 @@ static int aes_set_key(struct tegra_aes_dev *dd) eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; - } while (eng_busy & (!icq_empty) & dma_busy); + } while (eng_busy && !icq_empty && dma_busy); /* settable command to get key into internal registers */ value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | @@ -379,7 +379,7 @@ static int aes_set_key(struct tegra_aes_dev *dd) value = aes_readl(dd, TEGRA_AES_INTR_STATUS); eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; - } while (eng_busy & (!icq_empty)); + } while (eng_busy && !icq_empty); return 0; } -- cgit v1.2.3 From b8411ccd613fc7c504104f56265bd640b6c42d8e Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Tue, 20 Aug 2013 20:32:34 +0530 Subject: crypto: omap-sham - Enable Polling mode if DMA fails For writing input buffer into DATA_IN register current driver has the following state machine: -> if input buffer < 9 : use fallback driver -> else if input buffer < block size : Copy input buffer into data_in regs -> else use dma transfer. In cases where requesting for DMA channels fails for some reason, or channel numbers are not provided in DT or platform data, probe also fails. Instead of returning from driver use cpu polling mode. In this mode processor polls on INPUT_READY bit and writes data into data_in regs when it equals 1. This operation is repeated until the length of message. Now the state machine looks like: -> if input buffer < 9 : use fallback driver -> else if input buffer < block size : Copy input buffer into data_in regs -> else if dma enabled: use dma transfer else use cpu polling mode. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 61 +++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ae1ca8b2dfb2..0a2bd160849a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -225,6 +225,7 @@ struct omap_sham_dev { unsigned int dma; struct dma_chan *dma_lch; struct tasklet_struct done_task; + u8 polling_mode; unsigned long flags; struct crypto_queue queue; @@ -510,7 +511,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int count, len32; + int count, len32, bs32, offset = 0; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", @@ -522,18 +523,23 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, /* should be non-zero before next lines to disable clocks later */ ctx->digcnt += length; - if (dd->pdata->poll_irq(dd)) - return -ETIMEDOUT; - if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ set_bit(FLAGS_CPU, &dd->flags); len32 = DIV_ROUND_UP(length, sizeof(u32)); + bs32 = get_block_size(ctx) / sizeof(u32); + + while (len32) { + if (dd->pdata->poll_irq(dd)) + return -ETIMEDOUT; - for (count = 0; count < len32; count++) - omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); + for (count = 0; count < min(len32, bs32); count++, offset++) + omap_sham_write(dd, SHA_REG_DIN(dd, count), + buffer[offset]); + len32 -= min(len32, bs32); + } return -EINPROGRESS; } @@ -774,13 +780,22 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) static int omap_sham_update_cpu(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int bufcnt; + int bufcnt, final; + + if (!ctx->total) + return 0; omap_sham_append_sg(ctx); + + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; + + dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", + ctx->bufcnt, ctx->digcnt, final); + bufcnt = ctx->bufcnt; ctx->bufcnt = 0; - return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); + return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); } static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) @@ -903,8 +918,11 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err = 0, use_dma = 1; - if (ctx->bufcnt <= DMA_MIN) - /* faster to handle last block with cpu */ + if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) + /* + * faster to handle last block with cpu or + * use cpu when dma is not present. + */ use_dma = 0; if (use_dma) @@ -1056,6 +1074,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; int bs = get_block_size(ctx); if (!req->nbytes) @@ -1074,10 +1093,12 @@ static int omap_sham_update(struct ahash_request *req) */ omap_sham_append_sg(ctx); return 0; - } else if (ctx->bufcnt + ctx->total <= bs) { + } else if ((ctx->bufcnt + ctx->total <= bs) || + dd->polling_mode) { /* - * faster to use CPU for short transfers - */ + * faster to use CPU for short transfers or + * use cpu when dma is not present. + */ ctx->flags |= BIT(FLAGS_CPU); } } else if (ctx->bufcnt + ctx->total < ctx->buflen) { @@ -1589,8 +1610,12 @@ static void omap_sham_done_task(unsigned long data) } if (test_bit(FLAGS_CPU, &dd->flags)) { - if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) - goto finish; + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { + /* hash or semi-hash ready */ + err = omap_sham_update_cpu(dd); + if (err != -EINPROGRESS) + goto finish; + } } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); @@ -1910,10 +1935,8 @@ static int omap_sham_probe(struct platform_device *pdev) dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, &dd->dma, dev, "rx"); if (!dd->dma_lch) { - dev_err(dev, "unable to obtain RX DMA engine channel %u\n", - dd->dma); - err = -ENXIO; - goto data_err; + dd->polling_mode = 1; + dev_dbg(dev, "using polling mode instead of dma\n"); } dd->flags |= dd->pdata->flags; -- cgit v1.2.3 From f5e4626097f865783177f265d7793995bd8a2a76 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Tue, 20 Aug 2013 20:32:35 +0530 Subject: crypto: omap-sham - correct dma burst size Each cycle of SHA512 operates on 32 data words where as SHA256 operates on 16 data words. This needs to be updated while configuring DMA channels. Doing the same. Signed-off-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 0a2bd160849a..8bdde57f6bb1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -46,9 +46,6 @@ #define MD5_DIGEST_SIZE 16 -#define DST_MAXBURST 16 -#define DMA_MIN (DST_MAXBURST * sizeof(u32)) - #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) @@ -558,7 +555,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); struct dma_async_tx_descriptor *tx; struct dma_slave_config cfg; - int len32, ret; + int len32, ret, dma_min = get_block_size(ctx); dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); @@ -567,7 +564,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - cfg.dst_maxburst = DST_MAXBURST; + cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES; ret = dmaengine_slave_config(dd->dma_lch, &cfg); if (ret) { @@ -575,7 +572,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, return ret; } - len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN; + len32 = DIV_ROUND_UP(length, dma_min) * dma_min; if (is_sg) { /* @@ -729,7 +726,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) * the dmaengine infrastructure will calculate that it needs * to transfer 0 frames which ultimately fails. */ - if (ctx->total < (DST_MAXBURST * sizeof(u32))) + if (ctx->total < get_block_size(ctx)) return omap_sham_update_dma_slow(dd); dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", -- cgit v1.2.3 From a246968edcfcfe0503fa9bcd5a444e7a5b9986dc Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Tue, 20 Aug 2013 11:07:53 -0700 Subject: hwrng: omap - reorder OMAP TRNG driver code The newly added omap4 support in the driver was added without consideration for building older configs. When building omap1_defconfig, it resulted in: drivers/char/hw_random/omap-rng.c:190:12: warning: 'omap4_rng_init' defined but not used [-Wunused-function] drivers/char/hw_random/omap-rng.c:215:13: warning: 'omap4_rng_cleanup' defined but not used [-Wunused-function] drivers/char/hw_random/omap-rng.c:251:20: warning: 'omap4_rng_irq' defined but not used [-Wunused-function] Move the code around so it is grouped with its operations struct, which for the omap4 case means also under the #ifdef CONFIG_OF, where it needs to be. Signed-off-by: Olof Johansson Reviewed-by: Lokesh Vutla Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 108 +++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index f3f71425e7e2..9b89ff4881de 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -140,16 +140,6 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, __raw_writel(val, priv->base + priv->pdata->regs[reg]); } -static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) -{ - return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; -} - -static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) -{ - return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; -} - static int omap_rng_data_present(struct hwrng *rng, int wait) { struct omap_rng_dev *priv; @@ -187,6 +177,60 @@ static int omap_rng_data_read(struct hwrng *rng, u32 *data) return data_size; } +static int omap_rng_init(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + return priv->pdata->init(priv); +} + +static void omap_rng_cleanup(struct hwrng *rng) +{ + struct omap_rng_dev *priv; + + priv = (struct omap_rng_dev *)rng->priv; + priv->pdata->cleanup(priv); +} + +static struct hwrng omap_rng_ops = { + .name = "omap", + .data_present = omap_rng_data_present, + .data_read = omap_rng_data_read, + .init = omap_rng_init, + .cleanup = omap_rng_cleanup, +}; + +static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) +{ + return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; +} + +static int omap2_rng_init(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); + return 0; +} + +static void omap2_rng_cleanup(struct omap_rng_dev *priv) +{ + omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); +} + +static struct omap_rng_pdata omap2_rng_pdata = { + .regs = (u16 *)reg_map_omap2, + .data_size = OMAP2_RNG_OUTPUT_SIZE, + .data_present = omap2_rng_data_present, + .init = omap2_rng_init, + .cleanup = omap2_rng_cleanup, +}; + +#if defined(CONFIG_OF) +static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) +{ + return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; +} + static int omap4_rng_init(struct omap_rng_dev *priv) { u32 val; @@ -221,33 +265,6 @@ static void omap4_rng_cleanup(struct omap_rng_dev *priv) omap_rng_write(priv, RNG_CONFIG_REG, val); } -static int omap2_rng_init(struct omap_rng_dev *priv) -{ - omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); - return 0; -} - -static void omap2_rng_cleanup(struct omap_rng_dev *priv) -{ - omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); -} - -static int omap_rng_init(struct hwrng *rng) -{ - struct omap_rng_dev *priv; - - priv = (struct omap_rng_dev *)rng->priv; - return priv->pdata->init(priv); -} - -static void omap_rng_cleanup(struct hwrng *rng) -{ - struct omap_rng_dev *priv; - - priv = (struct omap_rng_dev *)rng->priv; - priv->pdata->cleanup(priv); -} - static irqreturn_t omap4_rng_irq(int irq, void *dev_id) { struct omap_rng_dev *priv = dev_id; @@ -275,23 +292,6 @@ static irqreturn_t omap4_rng_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static struct hwrng omap_rng_ops = { - .name = "omap", - .data_present = omap_rng_data_present, - .data_read = omap_rng_data_read, - .init = omap_rng_init, - .cleanup = omap_rng_cleanup, -}; - -static struct omap_rng_pdata omap2_rng_pdata = { - .regs = (u16 *)reg_map_omap2, - .data_size = OMAP2_RNG_OUTPUT_SIZE, - .data_present = omap2_rng_data_present, - .init = omap2_rng_init, - .cleanup = omap2_rng_cleanup, -}; - -#if defined(CONFIG_OF) static struct omap_rng_pdata omap4_rng_pdata = { .regs = (u16 *)reg_map_omap4, .data_size = OMAP4_RNG_OUTPUT_SIZE, -- cgit v1.2.3 From 9c823f9f7e4b392921d0d8b251bec080d58f9077 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 22 Aug 2013 06:43:37 +0000 Subject: padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED Share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED. It will fix 2 bugs: "not check the return value of __padata_remove_cpu() and __padata_add_cpu()". "need add 'break' between CPU_UP_CANCELED and CPU_DOWN_FAILED". Signed-off-by: Chen Gang Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/kernel/padata.c b/kernel/padata.c index 072f4ee4eb89..2f0037a86289 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -846,6 +846,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); @@ -857,6 +859,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); @@ -865,22 +869,6 @@ static int padata_cpu_callback(struct notifier_block *nfb, if (err) return notifier_from_errno(err); break; - - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - if (!pinst_has_cpu(pinst, cpu)) - break; - mutex_lock(&pinst->lock); - __padata_remove_cpu(pinst, cpu); - mutex_unlock(&pinst->lock); - - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - if (!pinst_has_cpu(pinst, cpu)) - break; - mutex_lock(&pinst->lock); - __padata_add_cpu(pinst, cpu); - mutex_unlock(&pinst->lock); } return NOTIFY_OK; -- cgit v1.2.3 From b8b4a4166e3401b7d8ea9deb8d64d875a468144c Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Fri, 23 Aug 2013 13:12:33 +0200 Subject: padata - Register hotcpu notifier after initialization padata_cpu_callback() takes pinst->lock, to avoid taking an uninitialized lock, register the notifier after it's initialization. Signed-off-by: Richard Weinberger Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- kernel/padata.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/padata.c b/kernel/padata.c index 2f0037a86289..07af2c95dcfe 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -1074,18 +1074,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq, pinst->flags = 0; -#ifdef CONFIG_HOTPLUG_CPU - pinst->cpu_notifier.notifier_call = padata_cpu_callback; - pinst->cpu_notifier.priority = 0; - register_hotcpu_notifier(&pinst->cpu_notifier); -#endif - put_online_cpus(); BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); +#ifdef CONFIG_HOTPLUG_CPU + pinst->cpu_notifier.notifier_call = padata_cpu_callback; + pinst->cpu_notifier.priority = 0; + register_hotcpu_notifier(&pinst->cpu_notifier); +#endif + return pinst; err_free_masks: -- cgit v1.2.3 From a8fc391a15957e2f2871c4ea3f6e84c33095c374 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:31 -0300 Subject: crypto: nx - add offset to nx_build_sg_lists() This patch includes one more parameter to nx_build_sg_lists() to skip the given number of bytes from beginning of each sg list. This is needed in order to implement the fixes for the AES modes to make them able to process larger chunks of data. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-cbc.c | 2 +- drivers/crypto/nx/nx-aes-ccm.c | 4 ++-- drivers/crypto/nx/nx-aes-ctr.c | 2 +- drivers/crypto/nx/nx-aes-ecb.c | 2 +- drivers/crypto/nx/nx-aes-gcm.c | 2 +- drivers/crypto/nx/nx.c | 9 +++++++-- drivers/crypto/nx/nx.h | 2 +- 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 7c0237dae02d..a9e76c6c37d8 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -85,7 +85,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, csbcpb->cpb.aes_cbc.iv); if (rc) goto out; diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 39d42245bc79..666a35b1181a 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -293,7 +293,7 @@ static int ccm_nx_decrypt(struct aead_request *req, if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; @@ -339,7 +339,7 @@ static int ccm_nx_encrypt(struct aead_request *req, if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 762611b883cb..80dee8d58659 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -98,7 +98,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, goto out; } - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, csbcpb->cpb.aes_ctr.iv); if (rc) goto out; diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 77dbe084ba41..fe0d803ed233 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -85,7 +85,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL); + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, NULL); if (rc) goto out; diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 74feee10f943..c2d6f76e3677 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -226,7 +226,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; - rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, + rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, 0, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index bdf4990f9758..5533fe31c90d 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -211,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @dst: destination scatterlist * @src: source scatterlist * @nbytes: length of data described in the scatterlists + * @offset: number of bytes to fast-forward past at the beginning of + * scatterlists. * @iv: destination for the iv data, if the algorithm requires it * * This is common code shared by all the AES algorithms. It uses the block @@ -222,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, + unsigned int offset, u8 *iv) { struct nx_sg *nx_insg = nx_ctx->in_sg; @@ -230,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, if (iv) memcpy(iv, desc->info, AES_BLOCK_SIZE); - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); - nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, + offset, nbytes); + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, + offset, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 14bb97f1c339..befda07ca1da 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -156,7 +156,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int, - u8 *); + unsigned int, u8 *); struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, struct scatterlist *, unsigned int, unsigned int); -- cgit v1.2.3 From ab74175938c0cd819733e68f5848bb4c818ec7aa Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:32 -0300 Subject: crypto: nx - fix limits to sg lists for AES-ECB This patch updates the nx-aes-ecb implementation to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Two different limits are considered: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-ecb.c | 48 ++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index fe0d803ed233..85a8d23cf29d 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -71,37 +71,49 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) { - rc = -EINVAL; - goto out; - } + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, NULL); - if (rc) - goto out; + do { + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(AES_BLOCK_SIZE - 1); - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, + processed, NULL); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + processed += to_process; + } while (processed < nbytes); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; -- cgit v1.2.3 From 2d290f0240c682a5dddf6b9ba39460c82f9fdff1 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:33 -0300 Subject: crypto: nx - fix limits to sg lists for AES-CBC This patch updates the nx-aes-cbc implementation to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Two different limits are considered: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-cbc.c | 56 +++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index a9e76c6c37d8..cc00b52306ba 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -71,39 +71,49 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) { - rc = -EINVAL; - goto out; - } + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, - csbcpb->cpb.aes_cbc.iv); - if (rc) - goto out; - - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } - - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; - - memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + do { + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(AES_BLOCK_SIZE - 1); + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, + processed, csbcpb->cpb.aes_cbc.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; -- cgit v1.2.3 From 884d981b04f3c00f61f4efaf9a93103e01260685 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:34 -0300 Subject: crypto: nx - fix limits to sg lists for AES-CTR This patch updates the nx-aes-ctr implementation to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Two different limits are considered: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-ctr.c | 50 ++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 80dee8d58659..a37d009dc75c 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -89,33 +89,45 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) { - rc = -EINVAL; - goto out; - } + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 0, - csbcpb->cpb.aes_ctr.iv); - if (rc) - goto out; + do { + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(AES_BLOCK_SIZE - 1); - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, + processed, csbcpb->cpb.aes_ctr.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + processed += to_process; + } while (processed < nbytes); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; -- cgit v1.2.3 From 799804348d11763b84213156318bb92cb955bfb5 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:35 -0300 Subject: crypto: nx - fix limits to sg lists for AES-GCM This patch updates the nx-aes-gcm implementation to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Two different limits are considered: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-gcm.c | 202 +++++++++++++++++++++++++++-------------- 1 file changed, 136 insertions(+), 66 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index c2d6f76e3677..9e89bdf34487 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -125,37 +125,101 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) { + int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; - int rc = -EINVAL; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; + unsigned int nbytes = req->assoclen; + unsigned int processed = 0, to_process; + u32 max_sg_len; - if (req->assoclen > nx_ctx->ap->databytelen) - goto out; - - if (req->assoclen <= AES_BLOCK_SIZE) { + if (nbytes <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->assoc); - scatterwalk_copychunks(out, &walk, req->assoclen, - SCATTERWALK_FROM_SG); + scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); - - rc = 0; - goto out; + return 0; } - nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, - req->assoclen); - nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); + NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + do { + /* + * to_process: the data chunk to process in this update. + * This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; + + nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, + req->assoc, processed, to_process); + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) + * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + memcpy(csbcpb_aead->cpb.aes_gca.in_pat, + csbcpb_aead->cpb.aes_gca.out_pat, + AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); + + return rc; +} + +static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, + int enc) +{ + int rc; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + + /* For scenarios where the input message is zero length, AES CTR mode + * may be used. Set the source data to be a single block (16B) of all + * zeros, and set the input IV value to be the same as the GMAC IV + * value. - nx_wb 4.8.1.3 */ + char src[AES_BLOCK_SIZE] = {}; + struct scatterlist sg; + + desc->tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); + if (IS_ERR(desc->tfm)) { + rc = -ENOMEM; + goto out; + } + + crypto_blkcipher_setkey(desc->tfm, csbcpb->cpb.aes_gcm.key, + NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : + NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); + + sg_init_one(&sg, src, AES_BLOCK_SIZE); + if (enc) + rc = crypto_blkcipher_encrypt_iv(desc, req->dst, &sg, + AES_BLOCK_SIZE); + else + rc = crypto_blkcipher_decrypt_iv(desc, req->dst, &sg, + AES_BLOCK_SIZE); + crypto_free_blkcipher(desc->tfm); + out: return rc; } @@ -166,79 +230,85 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; + unsigned int processed = 0, to_process; unsigned long irq_flags; + u32 max_sg_len; int rc = -EINVAL; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) - goto out; - desc.info = nx_ctx->priv.gcm.iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; - /* For scenarios where the input message is zero length, AES CTR mode - * may be used. Set the source data to be a single block (16B) of all - * zeros, and set the input IV value to be the same as the GMAC IV - * value. - nx_wb 4.8.1.3 */ if (nbytes == 0) { - char src[AES_BLOCK_SIZE] = {}; - struct scatterlist sg; - - desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); - if (IS_ERR(desc.tfm)) { - rc = -ENOMEM; - goto out; - } - - crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, - NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : - NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); - - sg_init_one(&sg, src, AES_BLOCK_SIZE); - if (enc) - crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, - AES_BLOCK_SIZE); - else - crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, - AES_BLOCK_SIZE); - crypto_free_blkcipher(desc.tfm); - - rc = 0; + rc = gcm_empty(req, &desc, enc); goto out; } - desc.tfm = (struct crypto_blkcipher *)req->base.tfm; - + /* Process associated data */ csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; - if (req->assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); if (rc) goto out; } - if (enc) + /* Set flags for encryption */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + if (enc) { NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; - else + } else { + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); + } - csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* + * to_process: the data chunk to process in this update. + * This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, 0, - csbcpb->cpb.aes_gcm.iv_or_cnt); - if (rc) - goto out; + csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; + desc.tfm = (struct crypto_blkcipher *) req->base.tfm; + rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, + req->src, to_process, processed, + csbcpb->cpb.aes_gcm.iv_or_cnt); + if (rc) + goto out; - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, + csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_s0, + csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + processed += to_process; + } while (processed < nbytes); if (enc) { /* copy out the auth tag */ -- cgit v1.2.3 From 9d6f1a82d3a81d603526980ef705b9ab39f997f3 Mon Sep 17 00:00:00 2001 From: Fionnuala Gunter Date: Thu, 29 Aug 2013 11:36:36 -0300 Subject: crypto: nx - fix limits to sg lists for AES-XCBC This patch updates the NX driver to perform several hyper calls when necessary so that the length limits of scatter/gather lists are respected. Reviewed-by: Joy Latten Reviewed-by: Marcelo Cerri Signed-off-by: Fionnuala Gunter Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-xcbc.c | 113 +++++++++++++++++++++++----------------- 1 file changed, 66 insertions(+), 47 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 658da0fd3e1f..1a5d9e372b4e 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -88,78 +88,97 @@ static int nx_xcbc_update(struct shash_desc *desc, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; - u32 to_process, leftover; + u32 to_process, leftover, total; + u32 max_sg_len; unsigned long irq_flags; int rc = 0; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously and we're updating again, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.aes_xcbc.cv, - csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); - } + + total = sctx->count + len; /* 2 cases for total data len: * 1: <= AES_BLOCK_SIZE: copy into state, return 0 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count <= AES_BLOCK_SIZE) { + if (total <= AES_BLOCK_SIZE) { memcpy(sctx->buffer + sctx->count, data, len); sctx->count += len; goto out; } - /* to_process: the AES_BLOCK_SIZE data chunk to process in this - * update */ - to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); - leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); - - /* the hardware will not accept a 0 byte operation for this algorithm - * and the operation MUST be finalized to be correct. So if we happen - * to get an update that falls on a block sized boundary, we must - * save off the last block to finalize with later. */ - if (!leftover) { - to_process -= AES_BLOCK_SIZE; - leftover = AES_BLOCK_SIZE; - } - - if (sctx->count) { - in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, - sctx->count, nx_ctx->ap->sglen); - in_sg = nx_build_sg_list(in_sg, (u8 *)data, - to_process - sctx->count, - nx_ctx->ap->sglen); + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + + /* to_process: the AES_BLOCK_SIZE data chunk to process in this + * update */ + to_process = min_t(u64, total, nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(AES_BLOCK_SIZE - 1); + leftover = total - to_process; + + /* the hardware will not accept a 0 byte operation for this + * algorithm and the operation MUST be finalized to be correct. + * So if we happen to get an update that falls on a block sized + * boundary, we must save off the last block to finalize with + * later. */ + if (!leftover) { + to_process -= AES_BLOCK_SIZE; + leftover = AES_BLOCK_SIZE; + } + + if (sctx->count) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buffer, + sctx->count, + max_sg_len); + } + in_sg = nx_build_sg_list(in_sg, + (u8 *) data, + to_process - sctx->count, + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - } else { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process, - nx_ctx->ap->sglen); - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * - sizeof(struct nx_sg); - } - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + /* we've hit the nx chip previously and we're updating again, + * so copy over the partial digest */ + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, + AES_BLOCK_SIZE); + } + + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } + atomic_inc(&(nx_ctx->stats->aes_ops)); - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - atomic_inc(&(nx_ctx->stats->aes_ops)); + total -= to_process; + data += to_process - sctx->count; + sctx->count = 0; + in_sg = nx_ctx->in_sg; + } while (leftover > AES_BLOCK_SIZE); /* copy the leftover back into the state struct */ - memcpy(sctx->buffer, data + len - leftover, leftover); + memcpy(sctx->buffer, data, leftover); sctx->count = leftover; - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; -- cgit v1.2.3 From 2b188b3b86005ca63eb851a1992f06b9a301f800 Mon Sep 17 00:00:00 2001 From: Fionnuala Gunter Date: Thu, 29 Aug 2013 11:36:37 -0300 Subject: crypto: nx - fix limits to sg lists for AES-CCM This patch updates the NX driver to perform several hyper calls when necessary so that the length limits of scatter/gather lists are respected. Reviewed-by: Marcelo Cerri Signed-off-by: Joy Latten Signed-off-by: Fionnuala Gunter Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-ccm.c | 283 ++++++++++++++++++++++++++++++----------- 1 file changed, 208 insertions(+), 75 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 666a35b1181a..5ecd4c2414aa 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -179,13 +179,26 @@ static int generate_pat(u8 *iv, struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int iauth_len = 0; - struct vio_pfo_op *op = NULL; u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; int rc; /* zero the ctr value */ memset(iv + 15 - iv[0], 0, iv[0] + 1); + /* page 78 of nx_wb.pdf has, + * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes + * in length. If a full message is used, the AES CCA implementation + * restricts the maximum AAD length to 2^32 -1 bytes. + * If partial messages are used, the implementation supports + * 2^64 -1 bytes maximum AAD length. + * + * However, in the cryptoapi's aead_request structure, + * assoclen is an unsigned int, thus it cannot hold a length + * value greater than 2^32 - 1. + * Thus the AAD is further constrained by this and is never + * greater than 2^32. + */ + if (!req->assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; } else if (req->assoclen <= 14) { @@ -195,7 +208,46 @@ static int generate_pat(u8 *iv, b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; iauth_len = req->assoclen; + } else if (req->assoclen <= 65280) { + /* if associated data is less than (2^16 - 2^8), we construct + * B1 differently and feed in the associated data to a CCA + * operation */ + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 14; + } else { + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 10; + } + + /* generate B0 */ + rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); + if (rc) + return rc; + + /* generate B1: + * add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (b1) { + memset(b1, 0, 16); + if (req->assoclen <= 65280) { + *(u16 *)b1 = (u16)req->assoclen; + scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } else { + *(u16 *)b1 = (u16)(0xfffe); + *(u32 *)&b1[2] = (u32)req->assoclen; + scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + } + } + /* now copy any remaining AAD to scatterlist and call nx... */ + if (!req->assoclen) { + return rc; + } else if (req->assoclen <= 14) { nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, nx_ctx->ap->sglen); @@ -210,56 +262,74 @@ static int generate_pat(u8 *iv, NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; - op = &nx_ctx->op; result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; - } else if (req->assoclen <= 65280) { - /* if associated data is less than (2^16 - 2^8), we construct - * B1 differently and feed in the associated data to a CCA - * operation */ - b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; - b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; - iauth_len = 14; - /* remaining assoc data must have scatterlist built for it */ - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, - req->assoc, iauth_len, - req->assoclen - iauth_len); - nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * - sizeof(struct nx_sg); + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); - op = &nx_ctx->op_aead; - result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } else { - /* if associated data is less than (2^32), we construct B1 - * differently yet again and feed in the associated data to a - * CCA operation */ - pr_err("associated data len is %u bytes (returning -EINVAL)\n", - req->assoclen); - rc = -EINVAL; - } + u32 max_sg_len; + unsigned int processed = 0, to_process; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, + nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + processed += iauth_len; + + do { + to_process = min_t(u32, req->assoclen - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < req->assoclen) { + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= + NX_FDM_INTERMEDIATE; + } else { + NX_CPB_FDM(nx_ctx->csbcpb_aead) &= + ~NX_FDM_INTERMEDIATE; + } + + nx_insg = nx_walk_and_build(nx_ctx->in_sg, + nx_ctx->ap->sglen, + req->assoc, processed, + to_process); + + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); - rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); - if (rc) - goto done; + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; - if (b1) { - memset(b1, 0, 16); - *(u16 *)b1 = (u16)req->assoclen; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + return rc; - scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, + nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, + AES_BLOCK_SIZE); - rc = nx_hcall_sync(nx_ctx, op, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto done; + NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < req->assoclen); - memcpy(out, result, AES_BLOCK_SIZE); + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } -done: + + memcpy(out, result, AES_BLOCK_SIZE); + return rc; } @@ -272,15 +342,12 @@ static int ccm_nx_decrypt(struct aead_request *req, unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc = -1; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) { - rc = -EINVAL; - goto out; - } - nbytes -= authsize; /* copy out the auth tag to compare with later */ @@ -293,22 +360,56 @@ static int ccm_nx_decrypt(struct aead_request *req, if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, - csbcpb->cpb.aes_ccm.iv_or_ctr); - if (rc) - goto out; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { - NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; - NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; + /* to_process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + if (rc) + goto out; + + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + processed += to_process; + } while (processed < nbytes); rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; @@ -325,41 +426,73 @@ static int ccm_nx_encrypt(struct aead_request *req, unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); unsigned long irq_flags; + unsigned int processed = 0, to_process; + u32 max_sg_len; int rc = -1; spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (nbytes > nx_ctx->ap->databytelen) { - rc = -EINVAL; - goto out; - } - rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 0, - csbcpb->cpb.aes_ccm.iv_or_ctr); - if (rc) - goto out; + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* to process: the AES_BLOCK_SIZE data chunk to process in this + * update. This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, + to_process, processed, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; - NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + /* for partial completion, copy following for next + * entry into loop... + */ + memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, + csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ccm.in_s0, + csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); - atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(csbcpb->csb.processed_byte_count, - &(nx_ctx->stats->aes_bytes)); + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + /* update stats */ + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + + } while (processed < nbytes); /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, req->dst, nbytes, authsize, SCATTERWALK_TO_SG); + out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; -- cgit v1.2.3 From 41e3173daf4e2d2f2dcc48ae7ffc8d0c4f3ecec9 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:38 -0300 Subject: crypto: nx - fix XCBC for zero length messages The NX XCBC implementation doesn't support zero length messages and because of that NX is currently returning a hard-coded hash for zero length messages. However this approach is incorrect since the hash value also depends on which key is used. This patch removes the hard-coded hash and replace it with an implementation based on the RFC 3566 using ECB. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-xcbc.c | 84 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 77 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 1a5d9e372b4e..03c4bf57d066 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -56,6 +56,77 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, return 0; } +/* + * Based on RFC 3566, for a zero-length message: + * + * n = 1 + * K1 = E(K, 0x01010101010101010101010101010101) + * K3 = E(K, 0x03030303030303030303030303030303) + * E[0] = 0x00000000000000000000000000000000 + * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) + * E[1] = (K1, M[1] ^ E[0] ^ K3) + * Tag = M[1] + */ +static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u8 keys[2][AES_BLOCK_SIZE]; + u8 key[32]; + int rc = 0; + + /* Change to ECB mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + + /* K1 and K3 base patterns */ + memset(keys[0], 0x01, sizeof(keys[0])); + memset(keys[1], 0x03, sizeof(keys[1])); + + /* Generate K1 and K3 encrypting the patterns */ + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), + nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* XOr K3 with the padding for a 0 length message */ + keys[1][0] ^= 0x80; + + /* Encrypt the final result */ + memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), + nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + +out: + /* Restore XCBC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; + memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + return rc; +} + static int nx_xcbc_init(struct shash_desc *desc) { struct xcbc_state *sctx = shash_desc_ctx(desc); @@ -201,13 +272,12 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) memcpy(csbcpb->cpb.aes_xcbc.cv, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); } else if (sctx->count == 0) { - /* we've never seen an update, so this is a 0 byte op. The - * hardware cannot handle a 0 byte op, so just copy out the - * known 0 byte result. This is cheaper than allocating a - * software context to do a 0 byte op */ - u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c, - 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 }; - memcpy(out, data, sizeof(data)); + /* + * we've never seen an update, so this is a 0 byte op. The + * hardware cannot handle a 0 byte op, so just ECB to + * generate the hash. + */ + rc = nx_xcbc_empty(desc, out); goto out; } -- cgit v1.2.3 From dec0ed6c1b2c8c2aa37c04feccaf4784764c95f1 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:39 -0300 Subject: crypto: nx - fix GCM for zero length messages The NX CGM implementation doesn't support zero length messages and the current implementation has two flaws: - When the input data length is zero, it ignores the associated data. - Even when both lengths are zero, it uses the Crypto API to encrypt a zeroed block using ctr(aes) and because of this it allocates a new transformation and sets the key for this new tfm. Both operations are intended to be used only in user context, while the cryptographic operations can be called in both user and softirq contexts. This patch replaces the nested Crypto API use and adds two special cases: - When input data and associated data lengths are zero: it uses NX ECB mode to emulate the encryption of a zeroed block using ctr(aes). - When input data is zero and associated data is available: it uses NX GMAC mode to calculate the associated data MAC. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-gcm.c | 132 ++++++++++++++++++++++++++++++++++------- 1 file changed, 112 insertions(+), 20 deletions(-) diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 9e89bdf34487..025d9a8d5b19 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -187,40 +187,125 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, return rc; } +static int gmac(struct aead_request *req, struct blkcipher_desc *desc) +{ + int rc; + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *nx_sg; + unsigned int nbytes = req->assoclen; + unsigned int processed = 0, to_process; + u32 max_sg_len; + + /* Set GMAC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; + + NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; + + /* page_limit: number of sg entries that fit on one page */ + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + /* Copy IV */ + memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); + + do { + /* + * to_process: the data chunk to process in this update. + * This value is bound by sg list limits. + */ + to_process = min_t(u64, nbytes - processed, + nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + + if ((to_process + processed) < nbytes) + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, + req->assoc, processed, to_process); + nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) + * sizeof(struct nx_sg); + + csbcpb->cpb.aes_gcm.bit_length_data = 0; + csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, + csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); + memcpy(csbcpb->cpb.aes_gcm.in_s0, + csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + processed += to_process; + } while (processed < nbytes); + +out: + /* Restore GCM mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + return rc; +} + static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, int enc) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + char out[AES_BLOCK_SIZE]; + struct nx_sg *in_sg, *out_sg; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ - char src[AES_BLOCK_SIZE] = {}; - struct scatterlist sg; - - desc->tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); - if (IS_ERR(desc->tfm)) { - rc = -ENOMEM; - goto out; - } - - crypto_blkcipher_setkey(desc->tfm, csbcpb->cpb.aes_gcm.key, - NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : - NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); - sg_init_one(&sg, src, AES_BLOCK_SIZE); + /* Change to ECB mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, + sizeof(csbcpb->cpb.aes_ecb.key)); if (enc) - rc = crypto_blkcipher_encrypt_iv(desc, req->dst, &sg, - AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else - rc = crypto_blkcipher_decrypt_iv(desc, req->dst, &sg, - AES_BLOCK_SIZE); - crypto_free_blkcipher(desc->tfm); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + /* Encrypt the counter/IV */ + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, + AES_BLOCK_SIZE, nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out), + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* Copy out the auth tag */ + memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, + crypto_aead_authsize(crypto_aead_reqtfm(req))); out: + /* Restore XCBC mode */ + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + + /* + * ECB key uses the same region that GCM AAD and counter, so it's safe + * to just fill it with zeroes. + */ + memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); + return rc; } @@ -242,8 +327,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; if (nbytes == 0) { - rc = gcm_empty(req, &desc, enc); - goto out; + if (req->assoclen == 0) + rc = gcm_empty(req, &desc, enc); + else + rc = gmac(req, &desc); + if (rc) + goto out; + else + goto mac; } /* Process associated data */ @@ -310,6 +401,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) processed += to_process; } while (processed < nbytes); +mac: if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, -- cgit v1.2.3 From 069fa0453f9dc86fd97dd5b3f5fda4724ed5ff69 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Thu, 29 Aug 2013 11:36:40 -0300 Subject: crypto: nx - fix SHA-2 for chunks bigger than block size Each call to the co-processor, with exception of the last call, needs to send data that is multiple of block size. As consequence, any remaining data is kept in the internal NX context. This patch fixes a bug in the driver that causes it to save incorrect data into the context when data is bigger than the block size. Reviewed-by: Joy Latten Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-sha256.c | 2 +- drivers/crypto/nx/nx-sha512.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 6547a7104bf6..da0b24a7633f 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -129,7 +129,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; total -= to_process; - data += to_process; + data += to_process - sctx->count; sctx->count = 0; in_sg = nx_ctx->in_sg; } while (leftover >= SHA256_BLOCK_SIZE); diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 236e6afeab10..4ae5b0f221d5 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -131,7 +131,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; total -= to_process; - data += to_process; + data += to_process - sctx->count[0]; sctx->count[0] = 0; in_sg = nx_ctx->in_sg; } while (leftover >= SHA512_BLOCK_SIZE); -- cgit v1.2.3 From 06e710bd5faa886e9b5d032d375761de28fcef54 Mon Sep 17 00:00:00 2001 From: Jan-Simon Möller Date: Thu, 29 Aug 2013 20:09:24 +0200 Subject: crypto: fcrypt - Fix bitoperation for compilation with clang MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Fix bug in statement as pointed out by Herbert Xu. Kudos to pipacs. Author: PaX Team ML-Post: http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20120507/142707.html URL: http://llvm.linuxfoundation.org Merge: Jan-Simon Möller Description: Fix for warning: linux/crypto/fcrypt.c:143:47: warning: signed shift result (0x598000000) requires 36 bits to represent, but 'int' only has 32 bits [-Wshift-overflow] Z(0xef), Z(0x70), Z(0xcf), Z(0xc2), Z(0x2a), Z(0xb3), Z(0x61), Z(0xad), ^~~~~~~ linux/crypto/fcrypt.c:113:29: note: expanded from macro 'Z' ^ ~~ linux/include/uapi/linux/byteorder/little_endian.h:38:53: note: expanded from macro '__cpu_to_be32' ^ linux/include/uapi/linux/swab.h:116:21: note: expanded from macro '__swab32' ___constant_swab32(x) : \ ^ linux/include/uapi/linux/swab.h:18:12: note: expanded from macro '___constant_swab32' (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ ^ Solution - make sure we don't exceed the 32 bit range. #define Z(x) cpu_to_be32(((x & 0x1f) << 27) | (x >> 5)) Signed-off-by: Jan-Simon Möller CC: pageexec@freemail.hu CC: llvmlinux@lists.linuxfoundation.org CC: behanw@converseincode.com CC: herbert@gondor.apana.org.au CC: davem@davemloft.net CC: linux-crypto@vger.kernel.org CC: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/fcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 3b2cf569c684..021d7fec6bc8 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -110,7 +110,7 @@ static const __be32 sbox0[256] = { }; #undef Z -#define Z(x) cpu_to_be32((x << 27) | (x >> 5)) +#define Z(x) cpu_to_be32(((x & 0x1f) << 27) | (x >> 5)) static const __be32 sbox1[256] = { Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e), Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85), -- cgit v1.2.3 From ff6f83fc9d44db09997937c3475d525a6866fbb4 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 1 Sep 2013 23:53:57 +0100 Subject: hwrng: via - Add MODULE_DEVICE_TABLE via-rng currently isn't auto-loaded if built as a module. Signed-off-by: Ben Hutchings Signed-off-by: Herbert Xu --- drivers/char/hw_random/via-rng.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d0387a84eec1..e737772ad69a 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -220,5 +221,11 @@ static void __exit mod_exit(void) module_init(mod_init); module_exit(mod_exit); +static struct x86_cpu_id via_rng_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + {} +}; + MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); -- cgit v1.2.3 From 68411521cc6055edc6274e03ab3210a5893533ba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 7 Sep 2013 12:56:26 +1000 Subject: Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework" This patch reinstates commits 67822649d7305caf3dd50ed46c27b99c94eff996 39761214eefc6b070f29402aa1165f24d789b3f7 0b95a7f85718adcbba36407ef88bba0a7379ed03 31d939625a9a20b1badd2d4e6bf6fd39fa523405 2d31e518a42828df7877bca23a958627d60408bc Now that module softdeps are in the kernel we can use that to resolve the boot issue which cause the revert. Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 2 + arch/x86/crypto/crct10dif-pcl-asm_64.S | 643 ++++++++++++++++++++++++++++++++ arch/x86/crypto/crct10dif-pclmul_glue.c | 151 ++++++++ crypto/Kconfig | 19 + crypto/Makefile | 1 + crypto/crct10dif.c | 178 +++++++++ crypto/tcrypt.c | 8 + crypto/testmgr.c | 10 + crypto/testmgr.h | 33 ++ include/linux/crc-t10dif.h | 4 + lib/Kconfig | 2 + lib/crc-t10dif.c | 74 ++-- 12 files changed, 1082 insertions(+), 43 deletions(-) create mode 100644 arch/x86/crypto/crct10dif-pcl-asm_64.S create mode 100644 arch/x86/crypto/crct10dif-pclmul_glue.c create mode 100644 crypto/crct10dif.c diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 6c63c358a7e6..7d6ba9db1be9 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o +obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) @@ -81,3 +82,4 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o +crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S new file mode 100644 index 000000000000..35e97569d05f --- /dev/null +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -0,0 +1,643 @@ +######################################################################## +# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions +# +# Copyright (c) 2013, Intel Corporation +# +# Authors: +# Erdinc Ozturk +# Vinodh Gopal +# James Guilford +# Tim Chen +# +# This software is available to you under a choice of one of two +# licenses. You may choose to be licensed under the terms of the GNU +# General Public License (GPL) Version 2, available from the file +# COPYING in the main directory of this source tree, or the +# OpenIB.org BSD license below: +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# * Neither the name of the Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +######################################################################## +# Function API: +# UINT16 crc_t10dif_pcl( +# UINT16 init_crc, //initial CRC value, 16 bits +# const unsigned char *buf, //buffer pointer to calculate CRC on +# UINT64 len //buffer length in bytes (64-bit data) +# ); +# +# Reference paper titled "Fast CRC Computation for Generic +# Polynomials Using PCLMULQDQ Instruction" +# URL: http://www.intel.com/content/dam/www/public/us/en/documents +# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +# +# + +#include + +.text + +#define arg1 %rdi +#define arg2 %rsi +#define arg3 %rdx + +#define arg1_low32 %edi + +ENTRY(crc_t10dif_pcl) +.align 16 + + # adjust the 16-bit initial_crc value, scale it to 32 bits + shl $16, arg1_low32 + + # Allocate Stack Space + mov %rsp, %rcx + sub $16*2, %rsp + # align stack to 16 byte boundary + and $~(0x10 - 1), %rsp + + # check if smaller than 256 + cmp $256, arg3 + + # for sizes less than 128, we can't fold 64B at a time... + jl _less_than_128 + + + # load the initial crc value + movd arg1_low32, %xmm10 # initial crc + + # crc value does not need to be byte-reflected, but it needs + # to be moved to the high part of the register. + # because data will be byte-reflected and will align with + # initial crc at correct place. + pslldq $12, %xmm10 + + movdqa SHUF_MASK(%rip), %xmm11 + # receive the initial 64B data, xor the initial crc value + movdqu 16*0(arg2), %xmm0 + movdqu 16*1(arg2), %xmm1 + movdqu 16*2(arg2), %xmm2 + movdqu 16*3(arg2), %xmm3 + movdqu 16*4(arg2), %xmm4 + movdqu 16*5(arg2), %xmm5 + movdqu 16*6(arg2), %xmm6 + movdqu 16*7(arg2), %xmm7 + + pshufb %xmm11, %xmm0 + # XOR the initial_crc value + pxor %xmm10, %xmm0 + pshufb %xmm11, %xmm1 + pshufb %xmm11, %xmm2 + pshufb %xmm11, %xmm3 + pshufb %xmm11, %xmm4 + pshufb %xmm11, %xmm5 + pshufb %xmm11, %xmm6 + pshufb %xmm11, %xmm7 + + movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 + #imm value of pclmulqdq instruction + #will determine which constant to use + + ################################################################# + # we subtract 256 instead of 128 to save one instruction from the loop + sub $256, arg3 + + # at this section of the code, there is 64*x+y (0<=y<64) bytes of + # buffer. The _fold_64_B_loop will fold 64B at a time + # until we have 64+y Bytes of buffer + + + # fold 64B at a time. This section of the code folds 4 xmm + # registers in parallel +_fold_64_B_loop: + + # update the buffer pointer + add $128, arg2 # buf += 64# + + movdqu 16*0(arg2), %xmm9 + movdqu 16*1(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm0, %xmm8 + movdqa %xmm1, %xmm13 + pclmulqdq $0x0 , %xmm10, %xmm0 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0 , %xmm10, %xmm1 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm0 + xorps %xmm8 , %xmm0 + pxor %xmm12, %xmm1 + xorps %xmm13, %xmm1 + + movdqu 16*2(arg2), %xmm9 + movdqu 16*3(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm2, %xmm8 + movdqa %xmm3, %xmm13 + pclmulqdq $0x0, %xmm10, %xmm2 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0, %xmm10, %xmm3 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm2 + xorps %xmm8 , %xmm2 + pxor %xmm12, %xmm3 + xorps %xmm13, %xmm3 + + movdqu 16*4(arg2), %xmm9 + movdqu 16*5(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm4, %xmm8 + movdqa %xmm5, %xmm13 + pclmulqdq $0x0, %xmm10, %xmm4 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0, %xmm10, %xmm5 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm4 + xorps %xmm8 , %xmm4 + pxor %xmm12, %xmm5 + xorps %xmm13, %xmm5 + + movdqu 16*6(arg2), %xmm9 + movdqu 16*7(arg2), %xmm12 + pshufb %xmm11, %xmm9 + pshufb %xmm11, %xmm12 + movdqa %xmm6 , %xmm8 + movdqa %xmm7 , %xmm13 + pclmulqdq $0x0 , %xmm10, %xmm6 + pclmulqdq $0x11, %xmm10, %xmm8 + pclmulqdq $0x0 , %xmm10, %xmm7 + pclmulqdq $0x11, %xmm10, %xmm13 + pxor %xmm9 , %xmm6 + xorps %xmm8 , %xmm6 + pxor %xmm12, %xmm7 + xorps %xmm13, %xmm7 + + sub $128, arg3 + + # check if there is another 64B in the buffer to be able to fold + jge _fold_64_B_loop + ################################################################## + + + add $128, arg2 + # at this point, the buffer pointer is pointing at the last y Bytes + # of the buffer the 64B of folded data is in 4 of the xmm + # registers: xmm0, xmm1, xmm2, xmm3 + + + # fold the 8 xmm registers to 1 xmm register with different constants + + movdqa rk9(%rip), %xmm10 + movdqa %xmm0, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm0 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm0, %xmm7 + + movdqa rk11(%rip), %xmm10 + movdqa %xmm1, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm1 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm1, %xmm7 + + movdqa rk13(%rip), %xmm10 + movdqa %xmm2, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm2 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm2, %xmm7 + + movdqa rk15(%rip), %xmm10 + movdqa %xmm3, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm3 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm3, %xmm7 + + movdqa rk17(%rip), %xmm10 + movdqa %xmm4, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm4 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm4, %xmm7 + + movdqa rk19(%rip), %xmm10 + movdqa %xmm5, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm5 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + xorps %xmm5, %xmm7 + + movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 + #imm value of pclmulqdq instruction + #will determine which constant to use + movdqa %xmm6, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm6 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm6, %xmm7 + + + # instead of 64, we add 48 to the loop counter to save 1 instruction + # from the loop instead of a cmp instruction, we use the negative + # flag with the jl instruction + add $128-16, arg3 + jl _final_reduction_for_128 + + # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 + # and the rest is in memory. We can fold 16 bytes at a time if y>=16 + # continue folding 16B at a time + +_16B_reduction_loop: + movdqa %xmm7, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm7 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + movdqu (arg2), %xmm0 + pshufb %xmm11, %xmm0 + pxor %xmm0 , %xmm7 + add $16, arg2 + sub $16, arg3 + # instead of a cmp instruction, we utilize the flags with the + # jge instruction equivalent of: cmp arg3, 16-16 + # check if there is any more 16B in the buffer to be able to fold + jge _16B_reduction_loop + + #now we have 16+z bytes left to reduce, where 0<= z < 16. + #first, we reduce the data in the xmm7 register + + +_final_reduction_for_128: + # check if any more data to fold. If not, compute the CRC of + # the final 128 bits + add $16, arg3 + je _128_done + + # here we are getting data that is less than 16 bytes. + # since we know that there was data before the pointer, we can + # offset the input pointer before the actual point, to receive + # exactly 16 bytes. after that the registers need to be adjusted. +_get_last_two_xmms: + movdqa %xmm7, %xmm2 + + movdqu -16(arg2, arg3), %xmm1 + pshufb %xmm11, %xmm1 + + # get rid of the extra data that was loaded before + # load the shift constant + lea pshufb_shf_table+16(%rip), %rax + sub arg3, %rax + movdqu (%rax), %xmm0 + + # shift xmm2 to the left by arg3 bytes + pshufb %xmm0, %xmm2 + + # shift xmm7 to the right by 16-arg3 bytes + pxor mask1(%rip), %xmm0 + pshufb %xmm0, %xmm7 + pblendvb %xmm2, %xmm1 #xmm0 is implicit + + # fold 16 Bytes + movdqa %xmm1, %xmm2 + movdqa %xmm7, %xmm8 + pclmulqdq $0x11, %xmm10, %xmm7 + pclmulqdq $0x0 , %xmm10, %xmm8 + pxor %xmm8, %xmm7 + pxor %xmm2, %xmm7 + +_128_done: + # compute crc of a 128-bit value + movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 + movdqa %xmm7, %xmm0 + + #64b fold + pclmulqdq $0x1, %xmm10, %xmm7 + pslldq $8 , %xmm0 + pxor %xmm0, %xmm7 + + #32b fold + movdqa %xmm7, %xmm0 + + pand mask2(%rip), %xmm0 + + psrldq $12, %xmm7 + pclmulqdq $0x10, %xmm10, %xmm7 + pxor %xmm0, %xmm7 + + #barrett reduction +_barrett: + movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 + movdqa %xmm7, %xmm0 + pclmulqdq $0x01, %xmm10, %xmm7 + pslldq $4, %xmm7 + pclmulqdq $0x11, %xmm10, %xmm7 + + pslldq $4, %xmm7 + pxor %xmm0, %xmm7 + pextrd $1, %xmm7, %eax + +_cleanup: + # scale the result back to 16 bits + shr $16, %eax + mov %rcx, %rsp + ret + +######################################################################## + +.align 16 +_less_than_128: + + # check if there is enough buffer to be able to fold 16B at a time + cmp $32, arg3 + jl _less_than_32 + movdqa SHUF_MASK(%rip), %xmm11 + + # now if there is, load the constants + movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + + movd arg1_low32, %xmm0 # get the initial crc value + pslldq $12, %xmm0 # align it to its correct place + movdqu (arg2), %xmm7 # load the plaintext + pshufb %xmm11, %xmm7 # byte-reflect the plaintext + pxor %xmm0, %xmm7 + + + # update the buffer pointer + add $16, arg2 + + # update the counter. subtract 32 instead of 16 to save one + # instruction from the loop + sub $32, arg3 + + jmp _16B_reduction_loop + + +.align 16 +_less_than_32: + # mov initial crc to the return value. this is necessary for + # zero-length buffers. + mov arg1_low32, %eax + test arg3, arg3 + je _cleanup + + movdqa SHUF_MASK(%rip), %xmm11 + + movd arg1_low32, %xmm0 # get the initial crc value + pslldq $12, %xmm0 # align it to its correct place + + cmp $16, arg3 + je _exact_16_left + jl _less_than_16_left + + movdqu (arg2), %xmm7 # load the plaintext + pshufb %xmm11, %xmm7 # byte-reflect the plaintext + pxor %xmm0 , %xmm7 # xor the initial crc value + add $16, arg2 + sub $16, arg3 + movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + jmp _get_last_two_xmms + + +.align 16 +_less_than_16_left: + # use stack space to load data less than 16 bytes, zero-out + # the 16B in memory first. + + pxor %xmm1, %xmm1 + mov %rsp, %r11 + movdqa %xmm1, (%r11) + + cmp $4, arg3 + jl _only_less_than_4 + + # backup the counter value + mov arg3, %r9 + cmp $8, arg3 + jl _less_than_8_left + + # load 8 Bytes + mov (arg2), %rax + mov %rax, (%r11) + add $8, %r11 + sub $8, arg3 + add $8, arg2 +_less_than_8_left: + + cmp $4, arg3 + jl _less_than_4_left + + # load 4 Bytes + mov (arg2), %eax + mov %eax, (%r11) + add $4, %r11 + sub $4, arg3 + add $4, arg2 +_less_than_4_left: + + cmp $2, arg3 + jl _less_than_2_left + + # load 2 Bytes + mov (arg2), %ax + mov %ax, (%r11) + add $2, %r11 + sub $2, arg3 + add $2, arg2 +_less_than_2_left: + cmp $1, arg3 + jl _zero_left + + # load 1 Byte + mov (arg2), %al + mov %al, (%r11) +_zero_left: + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + # shl r9, 4 + lea pshufb_shf_table+16(%rip), %rax + sub %r9, %rax + movdqu (%rax), %xmm0 + pxor mask1(%rip), %xmm0 + + pshufb %xmm0, %xmm7 + jmp _128_done + +.align 16 +_exact_16_left: + movdqu (arg2), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + jmp _128_done + +_only_less_than_4: + cmp $3, arg3 + jl _only_less_than_3 + + # load 3 Bytes + mov (arg2), %al + mov %al, (%r11) + + mov 1(arg2), %al + mov %al, 1(%r11) + + mov 2(arg2), %al + mov %al, 2(%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $5, %xmm7 + + jmp _barrett +_only_less_than_3: + cmp $2, arg3 + jl _only_less_than_2 + + # load 2 Bytes + mov (arg2), %al + mov %al, (%r11) + + mov 1(arg2), %al + mov %al, 1(%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $6, %xmm7 + + jmp _barrett +_only_less_than_2: + + # load 1 Byte + mov (arg2), %al + mov %al, (%r11) + + movdqa (%rsp), %xmm7 + pshufb %xmm11, %xmm7 + pxor %xmm0 , %xmm7 # xor the initial crc value + + psrldq $7, %xmm7 + + jmp _barrett + +ENDPROC(crc_t10dif_pcl) + +.data + +# precomputed constants +# these constants are precomputed from the poly: +# 0x8bb70000 (0x8bb7 scaled to 32 bits) +.align 16 +# Q = 0x18BB70000 +# rk1 = 2^(32*3) mod Q << 32 +# rk2 = 2^(32*5) mod Q << 32 +# rk3 = 2^(32*15) mod Q << 32 +# rk4 = 2^(32*17) mod Q << 32 +# rk5 = 2^(32*3) mod Q << 32 +# rk6 = 2^(32*2) mod Q << 32 +# rk7 = floor(2^64/Q) +# rk8 = Q +rk1: +.quad 0x2d56000000000000 +rk2: +.quad 0x06df000000000000 +rk3: +.quad 0x9d9d000000000000 +rk4: +.quad 0x7cf5000000000000 +rk5: +.quad 0x2d56000000000000 +rk6: +.quad 0x1368000000000000 +rk7: +.quad 0x00000001f65a57f8 +rk8: +.quad 0x000000018bb70000 + +rk9: +.quad 0xceae000000000000 +rk10: +.quad 0xbfd6000000000000 +rk11: +.quad 0x1e16000000000000 +rk12: +.quad 0x713c000000000000 +rk13: +.quad 0xf7f9000000000000 +rk14: +.quad 0x80a6000000000000 +rk15: +.quad 0x044c000000000000 +rk16: +.quad 0xe658000000000000 +rk17: +.quad 0xad18000000000000 +rk18: +.quad 0xa497000000000000 +rk19: +.quad 0x6ee3000000000000 +rk20: +.quad 0xe7b5000000000000 + + + +mask1: +.octa 0x80808080808080808080808080808080 +mask2: +.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + +SHUF_MASK: +.octa 0x000102030405060708090A0B0C0D0E0F + +pshufb_shf_table: +# use these values for shift constants for the pshufb instruction +# different alignments result in values as shown: +# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 +# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 +# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 +# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 +# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 +# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 +# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 +# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 +# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 +# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 +# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 +# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 +# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 +# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 +# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +.octa 0x8f8e8d8c8b8a89888786858483828100 +.octa 0x000e0d0c0b0a09080706050403020100 diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c new file mode 100644 index 000000000000..7845d7fd54c0 --- /dev/null +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -0,0 +1,151 @@ +/* + * Cryptographic API. + * + * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions + * + * Copyright (C) 2013 Intel Corporation + * Author: Tim Chen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, + size_t len); + +struct chksum_desc_ctx { + __u16 crc; +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + if (irq_fpu_usable()) { + kernel_fpu_begin(); + ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); + kernel_fpu_end(); + } else + ctx->crc = crc_t10dif_generic(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__u16 *)out = ctx->crc; + return 0; +} + +static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + if (irq_fpu_usable()) { + kernel_fpu_begin(); + *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); + kernel_fpu_end(); + } else + *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crct10dif", + .cra_driver_name = "crct10dif-pclmul", + .cra_priority = 200, + .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id crct10dif_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id); + +static int __init crct10dif_intel_mod_init(void) +{ + if (!x86_match_cpu(crct10dif_cpu_id)) + return -ENODEV; + + return crypto_register_shash(&alg); +} + +static void __exit crct10dif_intel_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crct10dif_intel_mod_init); +module_exit(crct10dif_intel_mod_fini); + +MODULE_AUTHOR("Tim Chen "); +MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("crct10dif"); +MODULE_ALIAS("crct10dif-pclmul"); diff --git a/crypto/Kconfig b/crypto/Kconfig index aca01164f002..69ce573f1224 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -376,6 +376,25 @@ config CRYPTO_CRC32_PCLMUL which will enable any routine to use the CRC-32-IEEE 802.3 checksum and gain better performance as compared with the table implementation. +config CRYPTO_CRCT10DIF + tristate "CRCT10DIF algorithm" + select CRYPTO_HASH + help + CRC T10 Data Integrity Field computation is being cast as + a crypto transform. This allows for faster crc t10 diff + transforms to be used if they are available. + +config CRYPTO_CRCT10DIF_PCLMUL + tristate "CRCT10DIF PCLMULQDQ hardware acceleration" + depends on X86 && 64BIT && CRC_T10DIF + select CRYPTO_HASH + help + For x86_64 processors with SSE4.2 and PCLMULQDQ supported, + CRC T10 DIF PCLMULQDQ computation can be hardware + accelerated PCLMULQDQ instruction. This option will create + 'crct10dif-plcmul' module, which is faster when computing the + crct10dif checksum as compared with the generic table implementation. + config CRYPTO_GHASH tristate "GHASH digest algorithm" select CRYPTO_GF128MUL diff --git a/crypto/Makefile b/crypto/Makefile index 2ba0df2f908f..2d5ed08a239f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_CRC32) += crc32.o +obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o diff --git a/crypto/crct10dif.c b/crypto/crct10dif.c new file mode 100644 index 000000000000..92aca96d6b98 --- /dev/null +++ b/crypto/crct10dif.c @@ -0,0 +1,178 @@ +/* + * Cryptographic API. + * + * T10 Data Integrity Field CRC16 Crypto Transform + * + * Copyright (c) 2007 Oracle Corporation. All rights reserved. + * Written by Martin K. Petersen + * Copyright (C) 2013 Intel Corporation + * Author: Tim Chen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct chksum_desc_ctx { + __u16 crc; +}; + +/* Table generated using the following polynomium: + * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 + * gt: 0x8bb7 + */ +static const __u16 t10_dif_crc_table[256] = { + 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, + 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, + 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, + 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, + 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, + 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, + 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, + 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, + 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, + 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, + 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, + 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, + 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, + 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, + 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, + 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, + 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, + 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, + 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, + 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, + 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, + 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, + 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, + 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, + 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, + 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, + 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, + 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, + 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, + 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, + 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, + 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 +}; + +__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) +{ + unsigned int i; + + for (i = 0 ; i < len ; i++) + crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + + return crc; +} +EXPORT_SYMBOL(crc_t10dif_generic); + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc_t10dif_generic(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__u16 *)out = ctx->crc; + return 0; +} + +static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crct10dif", + .cra_driver_name = "crct10dif-generic", + .cra_priority = 100, + .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init crct10dif_mod_init(void) +{ + int ret; + + ret = crypto_register_shash(&alg); + return ret; +} + +static void __exit crct10dif_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crct10dif_mod_init); +module_exit(crct10dif_mod_fini); + +MODULE_AUTHOR("Tim Chen "); +MODULE_DESCRIPTION("T10 DIF CRC calculation."); +MODULE_LICENSE("GPL"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 66d254ce0d11..25a5934f0e50 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1174,6 +1174,10 @@ static int do_test(int m) ret += tcrypt_test("ghash"); break; + case 47: + ret += tcrypt_test("crct10dif"); + break; + case 100: ret += tcrypt_test("hmac(md5)"); break; @@ -1498,6 +1502,10 @@ static int do_test(int m) test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 320: + test_hash_speed("crct10dif", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b7bc2e70895a..e091ef6e1791 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2045,6 +2045,16 @@ static const struct alg_test_desc alg_test_descs[] = { .count = CRC32C_TEST_VECTORS } } + }, { + .alg = "crct10dif", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = crct10dif_tv_template, + .count = CRCT10DIF_TEST_VECTORS + } + } }, { .alg = "cryptd(__driver-cbc-aes-aesni)", .test = alg_test_null, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1e701bc075b9..7d44aa3d6b44 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -450,6 +450,39 @@ static struct hash_testvec rmd320_tv_template[] = { } }; +#define CRCT10DIF_TEST_VECTORS 3 +static struct hash_testvec crct10dif_tv_template[] = { + { + .plaintext = "abc", + .psize = 3, +#ifdef __LITTLE_ENDIAN + .digest = "\x3b\x44", +#else + .digest = "\x44\x3b", +#endif + }, { + .plaintext = "1234567890123456789012345678901234567890" + "123456789012345678901234567890123456789", + .psize = 79, +#ifdef __LITTLE_ENDIAN + .digest = "\x70\x4b", +#else + .digest = "\x4b\x70", +#endif + }, { + .plaintext = + "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", + .psize = 56, +#ifdef __LITTLE_ENDIAN + .digest = "\xe3\x9c", +#else + .digest = "\x9c\xe3", +#endif + .np = 2, + .tap = { 28, 28 } + } +}; + /* * SHA1 test vectors from from FIPS PUB 180-1 * Long vector from CAVS 5.0 diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index a9c96d865ee7..b3cb71f0d3b0 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -3,6 +3,10 @@ #include +#define CRC_T10DIF_DIGEST_SIZE 2 +#define CRC_T10DIF_BLOCK_SIZE 1 + +__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); __u16 crc_t10dif(unsigned char const *, size_t); #endif diff --git a/lib/Kconfig b/lib/Kconfig index 71d9f81f6eed..35da51359d40 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -66,6 +66,8 @@ config CRC16 config CRC_T10DIF tristate "CRC calculation for the T10 Data Integrity Field" + select CRYPTO + select CRYPTO_CRCT10DIF help This option is only needed if a module that's not in the kernel tree needs to calculate CRC checks for use with the diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index fbbd66ed86cd..43bc5b071f96 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -11,57 +11,45 @@ #include #include #include +#include +#include +#include -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; +static struct crypto_shash *crct10dif_tfm; __u16 crc_t10dif(const unsigned char *buffer, size_t len) { - __u16 crc = 0; - unsigned int i; + struct { + struct shash_desc shash; + char ctx[2]; + } desc; + int err; + + desc.shash.tfm = crct10dif_tfm; + desc.shash.flags = 0; + *(__u16 *)desc.ctx = 0; - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + err = crypto_shash_update(&desc.shash, buffer, len); + BUG_ON(err); - return crc; + return *(__u16 *)desc.ctx; } EXPORT_SYMBOL(crc_t10dif); +static int __init crc_t10dif_mod_init(void) +{ + crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); + return PTR_RET(crct10dif_tfm); +} + +static void __exit crc_t10dif_mod_fini(void) +{ + crypto_free_shash(crct10dif_tfm); +} + +module_init(crc_t10dif_mod_init); +module_exit(crc_t10dif_mod_fini); + MODULE_DESCRIPTION("T10 DIF CRC calculation"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crct10dif"); -- cgit v1.2.3