summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/bcm/util.c2
-rw-r--r--drivers/crypto/cavium/Makefile4
-rw-r--r--drivers/crypto/cavium/zip/Makefile11
-rw-r--r--drivers/crypto/cavium/zip/common.h202
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c313
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h79
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c729
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h121
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c421
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c15
-rw-r--r--drivers/crypto/mediatek/mtk-platform.h56
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c309
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
24 files changed, 4234 insertions, 434 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 473d31288ad8..1a60626937e4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -515,6 +515,13 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
+config CRYPTO_DEV_CAVIUM_ZIP
+ tristate "Cavium ZIP driver"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ ---help---
+ Select this option if you want to enable compression/decompression
+ acceleration on Cavium's ARM based SoCs
+
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 739609471169..41ca339e89d3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 0502f460dacd..430c5570ea87 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -312,7 +312,7 @@ int do_shash(unsigned char *name, unsigned char *result,
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
- pr_err("%s: Could not genereate %s hash", __func__, name);
+ pr_err("%s: Could not generate %s hash", __func__, name);
do_shash_err:
crypto_free_shash(hash);
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
new file mode 100644
index 000000000000..641268b784be
--- /dev/null
+++ b/drivers/crypto/cavium/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for Cavium crypto device drivers
+#
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
new file mode 100644
index 000000000000..b2f3baaff757
--- /dev/null
+++ b/drivers/crypto/cavium/zip/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's ZIP Driver.
+#
+
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
+thunderx_zip-y := zip_main.o \
+ zip_device.o \
+ zip_crypto.o \
+ zip_mem.o \
+ zip_deflate.o \
+ zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
new file mode 100644
index 000000000000..dc451e0a43c5
--- /dev/null
+++ b/drivers/crypto/cavium/zip/common.h
@@ -0,0 +1,202 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* Device specific zlib function definitions */
+#include "zip_device.h"
+
+/* ZIP device definitions */
+#include "zip_main.h"
+
+/* ZIP memory allocation/deallocation related definitions */
+#include "zip_mem.h"
+
+/* Device specific structure definitions */
+#include "zip_regs.h"
+
+#define ZIP_ERROR -1
+
+#define ZIP_FLUSH_FINISH 4
+
+#define RAW_FORMAT 0 /* for rawpipe */
+#define ZLIB_FORMAT 1 /* for zpipe */
+#define GZIP_FORMAT 2 /* for gzpipe */
+#define LZS_FORMAT 3 /* for lzspipe */
+
+/* Max number of ZIP devices supported */
+#define MAX_ZIP_DEVICES 2
+
+/* Configures the number of zip queues to be used */
+#define ZIP_NUM_QUEUES 2
+
+#define DYNAMIC_STOP_EXCESS 1024
+
+/* Maximum buffer sizes in direct mode */
+#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
+#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
+
+/**
+ * struct zip_operation - common data structure for comp and decomp operations
+ * @input: Next input byte is read from here
+ * @output: Next output byte written here
+ * @ctx_addr: Inflate context buffer address
+ * @history: Pointer to the history buffer
+ * @input_len: Number of bytes available at next_in
+ * @input_total_len: Total number of input bytes read
+ * @output_len: Remaining free space at next_out
+ * @output_total_len: Total number of bytes output so far
+ * @csum: Checksum value of the uncompressed data
+ * @flush: Flush flag
+ * @format: Format (depends on stream's wrap)
+ * @speed: Speed depends on stream's level
+ * @ccode: Compression code ( stream's strategy)
+ * @lzs_flag: Flag for LZS support
+ * @begin_file: Beginning of file indication for inflate
+ * @history_len: Size of the history data
+ * @end_file: Ending of the file indication for inflate
+ * @compcode: Completion status of the ZIP invocation
+ * @bytes_read: Input bytes read in current instruction
+ * @bits_processed: Total bits processed for entire file
+ * @sizeofptr: To distinguish between ILP32 and LP64
+ * @sizeofzops: Optional just for padding
+ *
+ * This structure is used to maintain the required meta data for the
+ * comp and decomp operations.
+ */
+struct zip_operation {
+ u8 *input;
+ u8 *output;
+ u64 ctx_addr;
+ u64 history;
+
+ u32 input_len;
+ u32 input_total_len;
+
+ u32 output_len;
+ u32 output_total_len;
+
+ u32 csum;
+ u32 flush;
+
+ u32 format;
+ u32 speed;
+ u32 ccode;
+ u32 lzs_flag;
+
+ u32 begin_file;
+ u32 history_len;
+
+ u32 end_file;
+ u32 compcode;
+ u32 bytes_read;
+ u32 bits_processed;
+
+ u32 sizeofptr;
+ u32 sizeofzops;
+};
+
+/* error messages */
+#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+ fmt "\n", __func__, __LINE__, ## args)
+
+#ifdef MSG_ENABLE
+/* Enable all messages */
+#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
+#else
+#define zip_msg(fmt, args...)
+#endif
+
+#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
+
+#ifdef DEBUG_LEVEL
+
+#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
+ strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
+
+#if DEBUG_LEVEL >= 4
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+ fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 3
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+ fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 2
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
+ fmt "\n", __func__, __LINE__, ## args)
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG LEVEL >=4 */
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG_LEVEL */
+#else
+
+#define zip_dbg(fmt, args...)
+
+#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
new file mode 100644
index 000000000000..8df4d26cf9d4
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -0,0 +1,313 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "zip_crypto.h"
+
+static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
+ int lzs_flag)
+{
+ zip_ops->flush = ZIP_FLUSH_FINISH;
+
+ /* equivalent to level 6 of opensource zlib */
+ zip_ops->speed = 1;
+
+ if (!lzs_flag) {
+ zip_ops->ccode = 0; /* Auto Huffman */
+ zip_ops->lzs_flag = 0;
+ zip_ops->format = ZLIB_FORMAT;
+ } else {
+ zip_ops->ccode = 3; /* LZS Encoding */
+ zip_ops->lzs_flag = 1;
+ zip_ops->format = LZS_FORMAT;
+ }
+ zip_ops->begin_file = 1;
+ zip_ops->history_len = 0;
+ zip_ops->end_file = 1;
+ zip_ops->compcode = 0;
+ zip_ops->csum = 1; /* Adler checksum desired */
+}
+
+int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+{
+ struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
+ struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
+
+ zip_static_init_zip_ops(comp_ctx, lzs_flag);
+ zip_static_init_zip_ops(decomp_ctx, lzs_flag);
+
+ comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+ if (!comp_ctx->input)
+ return -ENOMEM;
+
+ comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+ if (!comp_ctx->output)
+ goto err_comp_input;
+
+ decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+ if (!decomp_ctx->input)
+ goto err_comp_output;
+
+ decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+ if (!decomp_ctx->output)
+ goto err_decomp_input;
+
+ return 0;
+
+err_decomp_input:
+ zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+err_comp_output:
+ zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+err_comp_input:
+ zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+ return -ENOMEM;
+}
+
+void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
+ struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
+
+ zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+ zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+ zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
+ zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+}
+
+int zip_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *zip_ops = NULL;
+ struct zip_state zip_state;
+ struct zip_device *zip = NULL;
+ int ret;
+
+ if (!zip_ctx || !src || !dst || !dlen)
+ return -ENOMEM;
+
+ zip = zip_get_device(zip_get_node_id());
+ if (!zip)
+ return -ENODEV;
+
+ memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_ops = &zip_ctx->zip_comp;
+
+ zip_ops->input_len = slen;
+ zip_ops->output_len = *dlen;
+ memcpy(zip_ops->input, src, slen);
+
+ ret = zip_deflate(zip_ops, &zip_state, zip);
+
+ if (!ret) {
+ *dlen = zip_ops->output_len;
+ memcpy(dst, zip_ops->output, *dlen);
+ }
+
+ return ret;
+}
+
+int zip_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ struct zip_kernel_ctx *zip_ctx)
+{
+ struct zip_operation *zip_ops = NULL;
+ struct zip_state zip_state;
+ struct zip_device *zip = NULL;
+ int ret;
+
+ if (!zip_ctx || !src || !dst || !dlen)
+ return -ENOMEM;
+
+ zip = zip_get_device(zip_get_node_id());
+ if (!zip)
+ return -ENODEV;
+
+ memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_ops = &zip_ctx->zip_decomp;
+ memcpy(zip_ops->input, src, slen);
+
+ /* Work around for a bug in zlib which needs an extra bytes sometimes */
+ if (zip_ops->ccode != 3) /* Not LZS Encoding */
+ zip_ops->input[slen++] = 0;
+
+ zip_ops->input_len = slen;
+ zip_ops->output_len = *dlen;
+
+ ret = zip_inflate(zip_ops, &zip_state, zip);
+
+ if (!ret) {
+ *dlen = zip_ops->output_len;
+ memcpy(dst, zip_ops->output, *dlen);
+ }
+
+ return ret;
+}
+
+/* Legacy Compress framework start */
+int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_ctx_init(zip_ctx, 0);
+
+ return ret;
+}
+
+int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_ctx_init(zip_ctx, 1);
+
+ return ret;
+}
+
+void zip_free_comp_ctx(struct crypto_tfm *tfm)
+{
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ zip_ctx_exit(zip_ctx);
+}
+
+int zip_comp_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+}
+
+int zip_comp_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+ ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+} /* Legacy compress framework end */
+
+/* SCOMP framework start */
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx;
+
+ zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+ if (!zip_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = zip_ctx_init(zip_ctx, 0);
+
+ if (ret) {
+ kzfree(zip_ctx);
+ return ERR_PTR(ret);
+ }
+
+ return zip_ctx;
+}
+
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx;
+
+ zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+ if (!zip_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = zip_ctx_init(zip_ctx, 1);
+
+ if (ret) {
+ kzfree(zip_ctx);
+ return ERR_PTR(ret);
+ }
+
+ return zip_ctx;
+}
+
+void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ zip_ctx_exit(zip_ctx);
+ kzfree(zip_ctx);
+}
+
+int zip_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+}
+
+int zip_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ int ret;
+ struct zip_kernel_ctx *zip_ctx = ctx;
+
+ ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+ return ret;
+} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
new file mode 100644
index 000000000000..b59ddfcacd34
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.h
@@ -0,0 +1,79 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_CRYPTO_H__
+#define __ZIP_CRYPTO_H__
+
+#include <linux/crypto.h>
+#include <crypto/internal/scompress.h>
+#include "common.h"
+#include "zip_deflate.h"
+#include "zip_inflate.h"
+
+struct zip_kernel_ctx {
+ struct zip_operation zip_comp;
+ struct zip_operation zip_decomp;
+};
+
+int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
+int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
+void zip_free_comp_ctx(struct crypto_tfm *tfm);
+int zip_comp_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+int zip_comp_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
+void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
+int zip_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx);
+int zip_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
new file mode 100644
index 000000000000..9a944b8c1e29
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -0,0 +1,200 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/* Prepares the deflate zip command */
+static int prepare_zip_command(struct zip_operation *zip_ops,
+ struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+ union zip_zres_s *result_ptr = &s->result;
+
+ memset(zip_cmd, 0, sizeof(s->zip_cmd));
+ memset(result_ptr, 0, sizeof(s->result));
+
+ /* IWORD #0 */
+ /* History gather */
+ zip_cmd->s.hg = 0;
+ /* compression enable = 1 for deflate */
+ zip_cmd->s.ce = 1;
+ /* sf (sync flush) */
+ zip_cmd->s.sf = 1;
+ /* ef (end of file) */
+ if (zip_ops->flush == ZIP_FLUSH_FINISH) {
+ zip_cmd->s.ef = 1;
+ zip_cmd->s.sf = 0;
+ }
+
+ zip_cmd->s.cc = zip_ops->ccode;
+ /* ss (compression speed/storage) */
+ zip_cmd->s.ss = zip_ops->speed;
+
+ /* IWORD #1 */
+ /* adler checksum */
+ zip_cmd->s.adlercrc32 = zip_ops->csum;
+ zip_cmd->s.historylength = zip_ops->history_len;
+ zip_cmd->s.dg = 0;
+
+ /* IWORD # 6 and 7 - compression input/history pointer */
+ zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
+ zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
+ zip_ops->history_len);
+ zip_cmd->s.ds = 0;
+
+ /* IWORD # 8 and 9 - Output pointer */
+ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
+ zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+ /* maximum number of output-stream bytes that can be written */
+ zip_cmd->s.totaloutputlength = zip_ops->output_len;
+
+ /* IWORD # 10 and 11 - Result pointer */
+ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+ /* Clearing completion code */
+ result_ptr->s.compcode = 0;
+
+ return 0;
+}
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev)
+{
+ union zip_inst_s *zip_cmd = &s->zip_cmd;
+ union zip_zres_s *result_ptr = &s->result;
+ u32 queue;
+
+ /* Prepares zip command based on the input parameters */
+ prepare_zip_command(zip_ops, s, zip_cmd);
+
+ atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
+ /* Loads zip command into command queues and rings door bell */
+ queue = zip_load_instr(zip_cmd, zip_dev);
+
+ /* Stats update for compression requests submitted */
+ atomic64_inc(&zip_dev->stats.comp_req_submit);
+
+ while (!result_ptr->s.compcode)
+ continue;
+
+ /* Stats update for compression requests completed */
+ atomic64_inc(&zip_dev->stats.comp_req_complete);
+
+ zip_ops->compcode = result_ptr->s.compcode;
+ switch (zip_ops->compcode) {
+ case ZIP_CMD_NOTDONE:
+ zip_dbg("Zip instruction not yet completed");
+ return ZIP_ERROR;
+
+ case ZIP_CMD_SUCCESS:
+ zip_dbg("Zip instruction completed successfully");
+ zip_update_cmd_bufs(zip_dev, queue);
+ break;
+
+ case ZIP_CMD_DTRUNC:
+ zip_dbg("Output Truncate error");
+ /* Returning ZIP_ERROR to avoid copy to user */
+ return ZIP_ERROR;
+
+ default:
+ zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
+ return ZIP_ERROR;
+ }
+
+ /* Update the CRC depending on the format */
+ switch (zip_ops->format) {
+ case RAW_FORMAT:
+ zip_dbg("RAW Format: %d ", zip_ops->format);
+ /* Get checksum from engine, need to feed it again */
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case ZLIB_FORMAT:
+ zip_dbg("ZLIB Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case GZIP_FORMAT:
+ zip_dbg("GZIP Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.crc32;
+ break;
+
+ case LZS_FORMAT:
+ zip_dbg("LZS Format: %d ", zip_ops->format);
+ break;
+
+ default:
+ zip_err("Unknown Format:%d\n", zip_ops->format);
+ }
+
+ atomic64_add(result_ptr->s.totalbyteswritten,
+ &zip_dev->stats.comp_out_bytes);
+
+ /* Update output_len */
+ if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+ /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
+ zip_err("output_len (%d) < total bytes written(%d)\n",
+ zip_ops->output_len, result_ptr->s.totalbyteswritten);
+ zip_ops->output_len = 0;
+
+ } else {
+ zip_ops->output_len = result_ptr->s.totalbyteswritten;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
new file mode 100644
index 000000000000..1d32e76edc4d
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEFLATE_H__
+#define __ZIP_DEFLATE_H__
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine by ringing the doorbell.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
new file mode 100644
index 000000000000..ccf21fb91513
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.c
@@ -0,0 +1,202 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/**
+ * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
+ *
+ * @zip_dev: Pointer to zip device structure
+ * @queue: Queue number
+ *
+ * Return: Bytes consumed in the command queue buffer.
+ */
+static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
+{
+ return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
+ sizeof(u64 *));
+}
+
+/**
+ * zip_load_instr - Submits the instruction into the ZIP command queue
+ * @instr: Pointer to the instruction to be submitted
+ * @zip_dev: Pointer to ZIP device structure to which the instruction is to
+ * be submitted
+ *
+ * This function copies the ZIP instruction to the command queue and rings the
+ * doorbell to notify the engine of the instruction submission. The command
+ * queue is maintained in a circular fashion. When there is space for exactly
+ * one instruction in the queue, next chunk pointer of the queue is made to
+ * point to the head of the queue, thus maintaining a circular queue.
+ *
+ * Return: Queue number to which the instruction was submitted
+ */
+u32 zip_load_instr(union zip_inst_s *instr,
+ struct zip_device *zip_dev)
+{
+ union zip_quex_doorbell dbell;
+ u32 queue = 0;
+ u32 consumed = 0;
+ u64 *ncb_ptr = NULL;
+ union zip_nptr_s ncp;
+
+ /*
+ * Distribute the instructions between the enabled queues based on
+ * the CPU id.
+ */
+ if (smp_processor_id() % 2 == 0)
+ queue = 0;
+ else
+ queue = 1;
+
+ zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue);
+
+ /* Take cmd buffer lock */
+ spin_lock(&zip_dev->iq[queue].lock);
+
+ /*
+ * Command Queue implementation
+ * 1. If there is place for new instructions, push the cmd at sw_head.
+ * 2. If there is place for exactly one instruction, push the new cmd
+ * at the sw_head. Make sw_head point to the sw_tail to make it
+ * circular. Write sw_head's physical address to the "Next-Chunk
+ * Buffer Ptr" to make it cmd_hw_tail.
+ * 3. Ring the door bell.
+ */
+ zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
+ zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
+
+ consumed = zip_cmd_queue_consumed(zip_dev, queue);
+ /* Check if there is space to push just one cmd */
+ if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
+ zip_dbg("Cmd queue space available for single command");
+ /* Space for one cmd, pust it and make it circular queue */
+ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+ sizeof(union zip_inst_s));
+ zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+ /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = zip_dev->iq[queue].sw_head;
+
+ zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
+ ncb_ptr, zip_dev->iq[queue].sw_head - 16);
+
+ /* Using Circular command queue */
+ zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
+ /* Mark this buffer for free */
+ zip_dev->iq[queue].free_flag = 1;
+
+ /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
+ ncp.u_reg64 = 0ull;
+ ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
+ *ncb_ptr = ncp.u_reg64;
+ zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
+ *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
+
+ zip_dev->iq[queue].pend_cnt++;
+
+ } else {
+ zip_dbg("Enough space is available for commands");
+ /* Push this cmd to cmd queue buffer */
+ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+ sizeof(union zip_inst_s));
+ zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+ zip_dev->iq[queue].pend_cnt++;
+ }
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+ zip_dev->iq[queue].hw_tail);
+
+ zip_dbg(" Pushed the new cmd : pend_cnt : %d",
+ zip_dev->iq[queue].pend_cnt);
+
+ /* Ring the doorbell */
+ dbell.u_reg64 = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write(dbell.u_reg64,
+ (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
+
+ /* Unlock cmd buffer lock */
+ spin_unlock(&zip_dev->iq[queue].lock);
+
+ return queue;
+}
+
+/**
+ * zip_update_cmd_bufs - Updates the queue statistics after posting the
+ * instruction
+ * @zip_dev: Pointer to zip device structure
+ * @queue: Queue number
+ */
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
+{
+ /* Take cmd buffer lock */
+ spin_lock(&zip_dev->iq[queue].lock);
+
+ /* Check if the previous buffer can be freed */
+ if (zip_dev->iq[queue].free_flag == 1) {
+ zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
+ /* Reset the free flag */
+ zip_dev->iq[queue].free_flag = 0;
+
+ /* Point the hw_tail to start of the new chunk buffer */
+ zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
+ } else {
+ zip_dbg("Free flag not set. increment hw tail");
+ zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
+ }
+
+ zip_dev->iq[queue].done_cnt++;
+ zip_dev->iq[queue].pend_cnt--;
+
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+ zip_dev->iq[queue].hw_tail);
+ zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
+
+ spin_unlock(&zip_dev->iq[queue].lock);
+}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
new file mode 100644
index 000000000000..9e18b3b93d38
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.h
@@ -0,0 +1,108 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEVICE_H__
+#define __ZIP_DEVICE_H__
+
+#include <linux/types.h>
+#include "zip_main.h"
+
+struct sg_info {
+ /*
+ * Pointer to the input data when scatter_gather == 0 and
+ * pointer to the input gather list buffer when scatter_gather == 1
+ */
+ union zip_zptr_s *gather;
+
+ /*
+ * Pointer to the output data when scatter_gather == 0 and
+ * pointer to the output scatter list buffer when scatter_gather == 1
+ */
+ union zip_zptr_s *scatter;
+
+ /*
+ * Holds size of the output buffer pointed by scatter list
+ * when scatter_gather == 1
+ */
+ u64 scatter_buf_size;
+
+ /* for gather data */
+ u64 gather_enable;
+
+ /* for scatter data */
+ u64 scatter_enable;
+
+ /* Number of gather list pointers for gather data */
+ u32 gbuf_cnt;
+
+ /* Number of scatter list pointers for scatter data */
+ u32 sbuf_cnt;
+
+ /* Buffers allocation state */
+ u8 alloc_state;
+};
+
+/**
+ * struct zip_state - Structure representing the required information related
+ * to a command
+ * @zip_cmd: Pointer to zip instruction structure
+ * @result: Pointer to zip result structure
+ * @ctx: Context pointer for inflate
+ * @history: Decompression history pointer
+ * @sginfo: Scatter-gather info structure
+ */
+struct zip_state {
+ union zip_inst_s zip_cmd;
+ union zip_zres_s result;
+ union zip_zptr_s *ctx;
+ union zip_zptr_s *history;
+ struct sg_info sginfo;
+};
+
+#define ZIP_CONTEXT_SIZE 2048
+#define ZIP_INFLATE_HISTORY_SIZE 32768
+#define ZIP_DEFLATE_HISTORY_SIZE 32768
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
new file mode 100644
index 000000000000..50cbdd83dbf2
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -0,0 +1,223 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_inflate.h"
+
+static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
+ struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+ union zip_zres_s *result_ptr = &s->result;
+
+ memset(zip_cmd, 0, sizeof(s->zip_cmd));
+ memset(result_ptr, 0, sizeof(s->result));
+
+ /* IWORD#0 */
+
+ /* Decompression History Gather list - no gather list */
+ zip_cmd->s.hg = 0;
+ /* For decompression, CE must be 0x0. */
+ zip_cmd->s.ce = 0;
+ /* For decompression, SS must be 0x0. */
+ zip_cmd->s.ss = 0;
+ /* For decompression, SF should always be set. */
+ zip_cmd->s.sf = 1;
+
+ /* Begin File */
+ if (zip_ops->begin_file == 0)
+ zip_cmd->s.bf = 0;
+ else
+ zip_cmd->s.bf = 1;
+
+ zip_cmd->s.ef = 1;
+ /* 0: for Deflate decompression, 3: for LZS decompression */
+ zip_cmd->s.cc = zip_ops->ccode;
+
+ /* IWORD #1*/
+
+ /* adler checksum */
+ zip_cmd->s.adlercrc32 = zip_ops->csum;
+
+ /*
+ * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
+ * History data is added to a decompression operation via IWORD3.
+ */
+ zip_cmd->s.historylength = 0;
+ zip_cmd->s.ds = 0;
+
+ /* IWORD # 8 and 9 - Output pointer */
+ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
+ zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+
+ /* Maximum number of output-stream bytes that can be written */
+ zip_cmd->s.totaloutputlength = zip_ops->output_len;
+
+ zip_dbg("Data Direct Input case ");
+
+ /* IWORD # 6 and 7 - input pointer */
+ zip_cmd->s.dg = 0;
+ zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
+ zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
+
+ /* IWORD # 10 and 11 - Result pointer */
+ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+
+ /* Clearing completion code */
+ result_ptr->s.compcode = 0;
+
+ /* Returning 0 for time being.*/
+ return 0;
+}
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev)
+{
+ union zip_inst_s *zip_cmd = &s->zip_cmd;
+ union zip_zres_s *result_ptr = &s->result;
+ u32 queue;
+
+ /* Prepare inflate zip command */
+ prepare_inflate_zcmd(zip_ops, s, zip_cmd);
+
+ atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
+
+ /* Load inflate command to zip queue and ring the doorbell */
+ queue = zip_load_instr(zip_cmd, zip_dev);
+
+ /* Decompression requests submitted stats update */
+ atomic64_inc(&zip_dev->stats.decomp_req_submit);
+
+ while (!result_ptr->s.compcode)
+ continue;
+
+ /* Decompression requests completed stats update */
+ atomic64_inc(&zip_dev->stats.decomp_req_complete);
+
+ zip_ops->compcode = result_ptr->s.compcode;
+ switch (zip_ops->compcode) {
+ case ZIP_CMD_NOTDONE:
+ zip_dbg("Zip Instruction not yet completed\n");
+ return ZIP_ERROR;
+
+ case ZIP_CMD_SUCCESS:
+ zip_dbg("Zip Instruction completed successfully\n");
+ break;
+
+ case ZIP_CMD_DYNAMIC_STOP:
+ zip_dbg(" Dynamic stop Initiated\n");
+ break;
+
+ default:
+ zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
+ atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
+ zip_update_cmd_bufs(zip_dev, queue);
+ return ZIP_ERROR;
+ }
+
+ zip_update_cmd_bufs(zip_dev, queue);
+
+ if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
+ (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
+ result_ptr->s.ef = 1;
+
+ zip_ops->csum = result_ptr->s.adler32;
+
+ atomic64_add(result_ptr->s.totalbyteswritten,
+ &zip_dev->stats.decomp_out_bytes);
+
+ if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+ zip_err("output_len (%d) < total bytes written (%d)\n",
+ zip_ops->output_len, result_ptr->s.totalbyteswritten);
+ zip_ops->output_len = 0;
+ } else {
+ zip_ops->output_len = result_ptr->s.totalbyteswritten;
+ }
+
+ zip_ops->bytes_read = result_ptr->s.totalbytesread;
+ zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
+ zip_ops->end_file = result_ptr->s.ef;
+ if (zip_ops->end_file) {
+ switch (zip_ops->format) {
+ case RAW_FORMAT:
+ zip_dbg("RAW Format: %d ", zip_ops->format);
+ /* Get checksum from engine */
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case ZLIB_FORMAT:
+ zip_dbg("ZLIB Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.adler32;
+ break;
+
+ case GZIP_FORMAT:
+ zip_dbg("GZIP Format: %d ", zip_ops->format);
+ zip_ops->csum = result_ptr->s.crc32;
+ break;
+
+ case LZS_FORMAT:
+ zip_dbg("LZS Format: %d ", zip_ops->format);
+ break;
+
+ default:
+ zip_err("Format error:%d\n", zip_ops->format);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
new file mode 100644
index 000000000000..6b20f179978e
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_INFLATE_H__
+#define __ZIP_INFLATE_H__
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s: Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+ struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
new file mode 100644
index 000000000000..0951e20b395b
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -0,0 +1,729 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_crypto.h"
+
+#define DRV_NAME "ThunderX-ZIP"
+
+static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
+
+static const struct pci_device_id zip_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
+ { 0, }
+};
+
+void zip_reg_write(u64 val, u64 __iomem *addr)
+{
+ writeq(val, addr);
+}
+
+u64 zip_reg_read(u64 __iomem *addr)
+{
+ return readq(addr);
+}
+
+/*
+ * Allocates new ZIP device structure
+ * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
+ */
+static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
+{
+ struct zip_device *zip = NULL;
+ int idx;
+
+ for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
+ if (!zip_dev[idx])
+ break;
+ }
+
+ /* To ensure that the index is within the limit */
+ if (idx < MAX_ZIP_DEVICES)
+ zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+
+ if (!zip)
+ return NULL;
+
+ zip_dev[idx] = zip;
+ zip->index = idx;
+ return zip;
+}
+
+/**
+ * zip_get_device - Get ZIP device based on node id of cpu
+ *
+ * @node: Node id of the current cpu
+ * Return: Pointer to Zip device structure
+ */
+struct zip_device *zip_get_device(int node)
+{
+ if ((node < MAX_ZIP_DEVICES) && (node >= 0))
+ return zip_dev[node];
+
+ zip_err("ZIP device not found for node id %d\n", node);
+ return NULL;
+}
+
+/**
+ * zip_get_node_id - Get the node id of the current cpu
+ *
+ * Return: Node id of the current cpu
+ */
+int zip_get_node_id(void)
+{
+ return cpu_to_node(smp_processor_id());
+}
+
+/* Initializes the ZIP h/w sub-system */
+static int zip_init_hw(struct zip_device *zip)
+{
+ union zip_cmd_ctl cmd_ctl;
+ union zip_constants constants;
+ union zip_que_ena que_ena;
+ union zip_quex_map que_map;
+ union zip_que_pri que_pri;
+
+ union zip_quex_sbuf_addr que_sbuf_addr;
+ union zip_quex_sbuf_ctl que_sbuf_ctl;
+
+ int q = 0;
+
+ /* Enable the ZIP Engine(Core) Clock */
+ cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
+ cmd_ctl.s.forceclk = 1;
+ zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
+
+ zip_msg("ZIP_CMD_CTL : 0x%016llx",
+ zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
+
+ constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
+ zip->depth = constants.s.depth;
+ zip->onfsize = constants.s.onfsize;
+ zip->ctxsize = constants.s.ctxsize;
+
+ zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
+ zip->depth, zip->onfsize, zip->ctxsize);
+
+ /*
+ * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
+ * have the correct buffer pointer and size configured for each
+ * instruction queue.
+ */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ que_sbuf_ctl.u_reg64 = 0ull;
+ que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
+ que_sbuf_ctl.s.inst_be = 0;
+ que_sbuf_ctl.s.stream_id = 0;
+ zip_reg_write(que_sbuf_ctl.u_reg64,
+ (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+
+ zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+ }
+
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
+
+ spin_lock_init(&zip->iq[q].lock);
+
+ if (zip_cmd_qbuf_alloc(zip, q)) {
+ while (q != 0) {
+ q--;
+ zip_cmd_qbuf_free(zip, q);
+ }
+ return -ENOMEM;
+ }
+
+ /* Initialize tail ptr to head */
+ zip->iq[q].sw_tail = zip->iq[q].sw_head;
+ zip->iq[q].hw_tail = zip->iq[q].sw_head;
+
+ /* Write the physical addr to register */
+ que_sbuf_addr.u_reg64 = 0ull;
+ que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
+ ZIP_128B_ALIGN);
+
+ zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
+ (u64)que_sbuf_addr.s.ptr);
+
+ zip_reg_write(que_sbuf_addr.u_reg64,
+ (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+ zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+ zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+ zip->iq[q].sw_head, zip->iq[q].sw_tail,
+ zip->iq[q].hw_tail);
+ zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
+ }
+
+ /*
+ * Queue-to-ZIP core mapping
+ * If a queue is not mapped to a particular core, it is equivalent to
+ * the ZIP core being disabled.
+ */
+ que_ena.u_reg64 = 0x0ull;
+ /* Enabling queues based on ZIP_NUM_QUEUES */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ que_ena.s.ena |= (0x1 << q);
+ zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
+
+ zip_msg("QUE_ENA : 0x%016llx",
+ zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
+
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ que_map.u_reg64 = 0ull;
+ /* Mapping each queue to two ZIP cores */
+ que_map.s.zce = 0x3;
+ zip_reg_write(que_map.u_reg64,
+ (zip->reg_base + ZIP_QUEX_MAP(q)));
+
+ zip_msg("QUE_MAP(%d) : 0x%016llx", q,
+ zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
+ }
+
+ que_pri.u_reg64 = 0ull;
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
+ zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
+
+ zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
+
+ return 0;
+}
+
+static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct zip_device *zip = NULL;
+ int err;
+
+ zip = zip_alloc_device(pdev);
+ if (!zip)
+ return -ENOMEM;
+
+ dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
+ pdev->vendor, pdev->device, dev_to_node(dev));
+
+ pci_set_drvdata(pdev, zip);
+ zip->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device");
+ goto err_free_device;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP configuration registers */
+ zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
+ if (!zip->reg_base) {
+ dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Initialize ZIP Hardware */
+ err = zip_init_hw(zip);
+ if (err)
+ goto err_release_regions;
+
+ return 0;
+
+err_release_regions:
+ if (zip->reg_base)
+ iounmap(zip->reg_base);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_device:
+ pci_set_drvdata(pdev, NULL);
+
+ /* Remove zip_dev from zip_device list, free the zip_device memory */
+ zip_dev[zip->index] = NULL;
+ devm_kfree(dev, zip);
+
+ return err;
+}
+
+static void zip_remove(struct pci_dev *pdev)
+{
+ struct zip_device *zip = pci_get_drvdata(pdev);
+ union zip_cmd_ctl cmd_ctl;
+ int q = 0;
+
+ if (!zip)
+ return;
+
+ if (zip->reg_base) {
+ cmd_ctl.u_reg64 = 0x0ull;
+ cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
+ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+ iounmap(zip->reg_base);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ /*
+ * Free Command Queue buffers. This free should be called for all
+ * the enabled Queues.
+ */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++)
+ zip_cmd_qbuf_free(zip, q);
+
+ pci_set_drvdata(pdev, NULL);
+ /* remove zip device from zip device list */
+ zip_dev[zip->index] = NULL;
+}
+
+/* PCI Sub-System Interface */
+static struct pci_driver zip_driver = {
+ .name = DRV_NAME,
+ .id_table = zip_id_table,
+ .probe = zip_probe,
+ .remove = zip_remove,
+};
+
+/* Kernel Crypto Subsystem Interface */
+
+static struct crypto_alg zip_comp_deflate = {
+ .cra_name = "deflate",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct zip_kernel_ctx),
+ .cra_priority = 300,
+ .cra_module = THIS_MODULE,
+ .cra_init = zip_alloc_comp_ctx_deflate,
+ .cra_exit = zip_free_comp_ctx,
+ .cra_u = { .compress = {
+ .coa_compress = zip_comp_compress,
+ .coa_decompress = zip_comp_decompress
+ } }
+};
+
+static struct crypto_alg zip_comp_lzs = {
+ .cra_name = "lzs",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct zip_kernel_ctx),
+ .cra_priority = 300,
+ .cra_module = THIS_MODULE,
+ .cra_init = zip_alloc_comp_ctx_lzs,
+ .cra_exit = zip_free_comp_ctx,
+ .cra_u = { .compress = {
+ .coa_compress = zip_comp_compress,
+ .coa_decompress = zip_comp_decompress
+ } }
+};
+
+static struct scomp_alg zip_scomp_deflate = {
+ .alloc_ctx = zip_alloc_scomp_ctx_deflate,
+ .free_ctx = zip_free_scomp_ctx,
+ .compress = zip_scomp_compress,
+ .decompress = zip_scomp_decompress,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-scomp",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 300,
+ }
+};
+
+static struct scomp_alg zip_scomp_lzs = {
+ .alloc_ctx = zip_alloc_scomp_ctx_lzs,
+ .free_ctx = zip_free_scomp_ctx,
+ .compress = zip_scomp_compress,
+ .decompress = zip_scomp_decompress,
+ .base = {
+ .cra_name = "lzs",
+ .cra_driver_name = "lzs-scomp",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 300,
+ }
+};
+
+static int zip_register_compression_device(void)
+{
+ int ret;
+
+ ret = crypto_register_alg(&zip_comp_deflate);
+ if (ret < 0) {
+ zip_err("Deflate algorithm registration failed\n");
+ return ret;
+ }
+
+ ret = crypto_register_alg(&zip_comp_lzs);
+ if (ret < 0) {
+ zip_err("LZS algorithm registration failed\n");
+ goto err_unregister_alg_deflate;
+ }
+
+ ret = crypto_register_scomp(&zip_scomp_deflate);
+ if (ret < 0) {
+ zip_err("Deflate scomp algorithm registration failed\n");
+ goto err_unregister_alg_lzs;
+ }
+
+ ret = crypto_register_scomp(&zip_scomp_lzs);
+ if (ret < 0) {
+ zip_err("LZS scomp algorithm registration failed\n");
+ goto err_unregister_scomp_deflate;
+ }
+
+ return ret;
+
+err_unregister_scomp_deflate:
+ crypto_unregister_scomp(&zip_scomp_deflate);
+err_unregister_alg_lzs:
+ crypto_unregister_alg(&zip_comp_lzs);
+err_unregister_alg_deflate:
+ crypto_unregister_alg(&zip_comp_deflate);
+
+ return ret;
+}
+
+static void zip_unregister_compression_device(void)
+{
+ crypto_unregister_alg(&zip_comp_deflate);
+ crypto_unregister_alg(&zip_comp_lzs);
+ crypto_unregister_scomp(&zip_scomp_deflate);
+ crypto_unregister_scomp(&zip_scomp_lzs);
+}
+
+/*
+ * debugfs functions
+ */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+/* Displays ZIP device statistics */
+static int zip_show_stats(struct seq_file *s, void *unused)
+{
+ u64 val = 0ull;
+ u64 avg_chunk = 0ull, avg_cr = 0ull;
+ u32 q = 0;
+
+ int index = 0;
+ struct zip_device *zip;
+ struct zip_stats *st;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ zip = zip_dev[index];
+ st = &zip->stats;
+
+ /* Get all the pending requests */
+ for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+ val = zip_reg_read((zip->reg_base +
+ ZIP_DBG_COREX_STA(q)));
+ val = (val >> 32);
+ val = val & 0xffffff;
+ atomic64_add(val, &st->pending_req);
+ }
+
+ avg_chunk = (atomic64_read(&st->comp_in_bytes) /
+ atomic64_read(&st->comp_req_complete));
+ avg_cr = (atomic64_read(&st->comp_in_bytes) /
+ atomic64_read(&st->comp_out_bytes));
+ seq_printf(s, " ZIP Device %d Stats\n"
+ "-----------------------------------\n"
+ "Comp Req Submitted : \t%ld\n"
+ "Comp Req Completed : \t%ld\n"
+ "Compress In Bytes : \t%ld\n"
+ "Compressed Out Bytes : \t%ld\n"
+ "Average Chunk size : \t%llu\n"
+ "Average Compression ratio : \t%llu\n"
+ "Decomp Req Submitted : \t%ld\n"
+ "Decomp Req Completed : \t%ld\n"
+ "Decompress In Bytes : \t%ld\n"
+ "Decompressed Out Bytes : \t%ld\n"
+ "Decompress Bad requests : \t%ld\n"
+ "Pending Req : \t%ld\n"
+ "---------------------------------\n",
+ index,
+ atomic64_read(&st->comp_req_submit),
+ atomic64_read(&st->comp_req_complete),
+ atomic64_read(&st->comp_in_bytes),
+ atomic64_read(&st->comp_out_bytes),
+ avg_chunk,
+ avg_cr,
+ atomic64_read(&st->decomp_req_submit),
+ atomic64_read(&st->decomp_req_complete),
+ atomic64_read(&st->decomp_in_bytes),
+ atomic64_read(&st->decomp_out_bytes),
+ atomic64_read(&st->decomp_bad_reqs),
+ atomic64_read(&st->pending_req));
+
+ /* Reset pending requests count */
+ atomic64_set(&st->pending_req, 0);
+ }
+ }
+ return 0;
+}
+
+/* Clears stats data */
+static int zip_clear_stats(struct seq_file *s, void *unused)
+{
+ int index = 0;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ memset(&zip_dev[index]->stats, 0,
+ sizeof(struct zip_state));
+ seq_printf(s, "Cleared stats for zip %d\n", index);
+ }
+ }
+
+ return 0;
+}
+
+static struct zip_registers zipregs[64] = {
+ {"ZIP_CMD_CTL ", 0x0000ull},
+ {"ZIP_THROTTLE ", 0x0010ull},
+ {"ZIP_CONSTANTS ", 0x00A0ull},
+ {"ZIP_QUE0_MAP ", 0x1400ull},
+ {"ZIP_QUE1_MAP ", 0x1408ull},
+ {"ZIP_QUE_ENA ", 0x0500ull},
+ {"ZIP_QUE_PRI ", 0x0508ull},
+ {"ZIP_QUE0_DONE ", 0x2000ull},
+ {"ZIP_QUE1_DONE ", 0x2008ull},
+ {"ZIP_QUE0_DOORBELL ", 0x4000ull},
+ {"ZIP_QUE1_DOORBELL ", 0x4008ull},
+ {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
+ {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
+ {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
+ {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
+ { NULL, 0}
+};
+
+/* Prints registers' contents */
+static int zip_print_regs(struct seq_file *s, void *unused)
+{
+ u64 val = 0;
+ int i = 0, index = 0;
+
+ for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ if (zip_dev[index]) {
+ seq_printf(s, "--------------------------------\n"
+ " ZIP Device %d Registers\n"
+ "--------------------------------\n",
+ index);
+
+ i = 0;
+
+ while (zipregs[i].reg_name) {
+ val = zip_reg_read((zip_dev[index]->reg_base +
+ zipregs[i].reg_offset));
+ seq_printf(s, "%s: 0x%016llx\n",
+ zipregs[i].reg_name, val);
+ i++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int zip_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_show_stats, NULL);
+}
+
+static const struct file_operations zip_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_stats_open,
+ .read = seq_read,
+};
+
+static int zip_clear_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_clear_stats, NULL);
+}
+
+static const struct file_operations zip_clear_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_clear_open,
+ .read = seq_read,
+};
+
+static int zip_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, zip_print_regs, NULL);
+}
+
+static const struct file_operations zip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = zip_regs_open,
+ .read = seq_read,
+};
+
+/* Root directory for thunderx_zip debugfs entry */
+static struct dentry *zip_debugfs_root;
+
+static int __init zip_debugfs_init(void)
+{
+ struct dentry *zip_stats, *zip_clear, *zip_regs;
+
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
+ if (!zip_debugfs_root)
+ return -ENOMEM;
+
+ /* Creating files for entries inside thunderx_zip directory */
+ zip_stats = debugfs_create_file("zip_stats", 0444,
+ zip_debugfs_root,
+ NULL, &zip_stats_fops);
+ if (!zip_stats)
+ goto failed_to_create;
+
+ zip_clear = debugfs_create_file("zip_clear", 0444,
+ zip_debugfs_root,
+ NULL, &zip_clear_fops);
+ if (!zip_clear)
+ goto failed_to_create;
+
+ zip_regs = debugfs_create_file("zip_regs", 0444,
+ zip_debugfs_root,
+ NULL, &zip_regs_fops);
+ if (!zip_regs)
+ goto failed_to_create;
+
+ return 0;
+
+failed_to_create:
+ debugfs_remove_recursive(zip_debugfs_root);
+ return -ENOENT;
+}
+
+static void __exit zip_debugfs_exit(void)
+{
+ debugfs_remove_recursive(zip_debugfs_root);
+}
+
+#else
+static int __init zip_debugfs_init(void)
+{
+ return 0;
+}
+
+static void __exit zip_debugfs_exit(void) { }
+
+#endif
+/* debugfs - end */
+
+static int __init zip_init_module(void)
+{
+ int ret;
+
+ zip_msg("%s\n", DRV_NAME);
+
+ ret = pci_register_driver(&zip_driver);
+ if (ret < 0) {
+ zip_err("ZIP: pci_register_driver() failed\n");
+ return ret;
+ }
+
+ /* Register with the Kernel Crypto Interface */
+ ret = zip_register_compression_device();
+ if (ret < 0) {
+ zip_err("ZIP: Kernel Crypto Registration failed\n");
+ goto err_pci_unregister;
+ }
+
+ /* comp-decomp statistics are handled with debugfs interface */
+ ret = zip_debugfs_init();
+ if (ret < 0) {
+ zip_err("ZIP: debugfs initialization failed\n");
+ goto err_crypto_unregister;
+ }
+
+ return ret;
+
+err_crypto_unregister:
+ zip_unregister_compression_device();
+
+err_pci_unregister:
+ pci_unregister_driver(&zip_driver);
+ return ret;
+}
+
+static void __exit zip_cleanup_module(void)
+{
+ zip_debugfs_exit();
+
+ /* Unregister from the kernel crypto interface */
+ zip_unregister_compression_device();
+
+ /* Unregister this driver for pci zip devices */
+ pci_unregister_driver(&zip_driver);
+}
+
+module_init(zip_init_module);
+module_exit(zip_cleanup_module);
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
new file mode 100644
index 000000000000..64e051f60784
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.h
@@ -0,0 +1,121 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MAIN_H__
+#define __ZIP_MAIN_H__
+
+#include "zip_device.h"
+#include "zip_regs.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
+
+/* ZIP device BARs */
+#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
+
+/* Maximum available zip queues */
+#define ZIP_MAX_NUM_QUEUES 8
+
+#define ZIP_128B_ALIGN 7
+
+/* Command queue buffer size */
+#define ZIP_CMD_QBUF_SIZE (8064 + 8)
+
+struct zip_registers {
+ char *reg_name;
+ u64 reg_offset;
+};
+
+/* ZIP Compression - Decompression stats */
+struct zip_stats {
+ atomic64_t comp_req_submit;
+ atomic64_t comp_req_complete;
+ atomic64_t decomp_req_submit;
+ atomic64_t decomp_req_complete;
+ atomic64_t pending_req;
+ atomic64_t comp_in_bytes;
+ atomic64_t comp_out_bytes;
+ atomic64_t decomp_in_bytes;
+ atomic64_t decomp_out_bytes;
+ atomic64_t decomp_bad_reqs;
+};
+
+/* ZIP Instruction Queue */
+struct zip_iq {
+ u64 *sw_head;
+ u64 *sw_tail;
+ u64 *hw_tail;
+ u64 done_cnt;
+ u64 pend_cnt;
+ u64 free_flag;
+
+ /* ZIP IQ lock */
+ spinlock_t lock;
+};
+
+/* ZIP Device */
+struct zip_device {
+ u32 index;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+
+ /* Different ZIP Constants */
+ u64 depth;
+ u64 onfsize;
+ u64 ctxsize;
+
+ struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
+ struct zip_stats stats;
+};
+
+/* Prototypes */
+struct zip_device *zip_get_device(int node_id);
+int zip_get_node_id(void);
+void zip_reg_write(u64 val, u64 __iomem *addr);
+u64 zip_reg_read(u64 __iomem *addr);
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
+u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
+
+#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
new file mode 100644
index 000000000000..b3e0843a9169
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.c
@@ -0,0 +1,114 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to allocate bufffer to
+ * Return: 0 if successful, -ENOMEM otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
+{
+ zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+ get_order(ZIP_CMD_QBUF_SIZE));
+
+ if (!zip->iq[q].sw_head)
+ return -ENOMEM;
+
+ memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
+
+ zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
+ return 0;
+}
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q)
+{
+ zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
+
+ free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
+}
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size: Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size)
+{
+ u8 *ptr;
+
+ ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+ get_order(size));
+
+ if (!ptr)
+ return NULL;
+
+ memset(ptr, 0, size);
+
+ zip_dbg("Data buffer allocation success\n");
+ return ptr;
+}
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr: Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size)
+{
+ zip_dbg("Freeing data buffer 0x%lx\n", ptr);
+
+ free_pages((u64)ptr, get_order(size));
+}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
new file mode 100644
index 000000000000..f8f2f08c4a5c
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.h
@@ -0,0 +1,78 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MEM_H__
+#define __ZIP_MEM_H__
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q: Queue nmber to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q);
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
+ * @zip: Pointer to zip device structure
+ * @q: Queue number to allocate bufffer to
+ * Return: 0 if successful, 1 otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size: Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size);
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr: Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size);
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
new file mode 100644
index 000000000000..d0be682305c1
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_regs.h
@@ -0,0 +1,1347 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License: Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Inc. nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_REGS_H__
+#define __ZIP_REGS_H__
+
+/*
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium ZIP.
+ */
+
+#include <linux/kern_levels.h>
+
+/* ZIP invocation result completion status codes */
+#define ZIP_CMD_NOTDONE 0x0
+
+/* Successful completion. */
+#define ZIP_CMD_SUCCESS 0x1
+
+/* Output truncated */
+#define ZIP_CMD_DTRUNC 0x2
+
+/* Dynamic Stop */
+#define ZIP_CMD_DYNAMIC_STOP 0x3
+
+/* Uncompress ran out of input data when IWORD0[EF] was set */
+#define ZIP_CMD_ITRUNC 0x4
+
+/* Uncompress found the reserved block type 3 */
+#define ZIP_CMD_RBLOCK 0x5
+
+/*
+ * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
+ */
+#define ZIP_CMD_NLEN 0x6
+
+/* Uncompress found a bad code in the main Huffman codes. */
+#define ZIP_CMD_BADCODE 0x7
+
+/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
+#define ZIP_CMD_BADCODE2 0x8
+
+/* Compress found a zero-length input. */
+#define ZIP_CMD_ZERO_LEN 0x9
+
+/* The compress or decompress encountered an internal parity error. */
+#define ZIP_CMD_PARITY 0xA
+
+/*
+ * Uncompress found a string identifier that precedes the uncompressed data and
+ * decompression history.
+ */
+#define ZIP_CMD_FATAL 0xB
+
+/**
+ * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
+ * interrupt vectors.
+ */
+enum zip_int_vec_e {
+ ZIP_INT_VEC_E_ECCE = 0x10,
+ ZIP_INT_VEC_E_FIFE = 0x11,
+ ZIP_INT_VEC_E_QUE0_DONE = 0x0,
+ ZIP_INT_VEC_E_QUE0_ERR = 0x8,
+ ZIP_INT_VEC_E_QUE1_DONE = 0x1,
+ ZIP_INT_VEC_E_QUE1_ERR = 0x9,
+ ZIP_INT_VEC_E_QUE2_DONE = 0x2,
+ ZIP_INT_VEC_E_QUE2_ERR = 0xa,
+ ZIP_INT_VEC_E_QUE3_DONE = 0x3,
+ ZIP_INT_VEC_E_QUE3_ERR = 0xb,
+ ZIP_INT_VEC_E_QUE4_DONE = 0x4,
+ ZIP_INT_VEC_E_QUE4_ERR = 0xc,
+ ZIP_INT_VEC_E_QUE5_DONE = 0x5,
+ ZIP_INT_VEC_E_QUE5_ERR = 0xd,
+ ZIP_INT_VEC_E_QUE6_DONE = 0x6,
+ ZIP_INT_VEC_E_QUE6_ERR = 0xe,
+ ZIP_INT_VEC_E_QUE7_DONE = 0x7,
+ ZIP_INT_VEC_E_QUE7_ERR = 0xf,
+ ZIP_INT_VEC_E_ENUM_LAST = 0x12,
+};
+
+/**
+ * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_addr_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+
+};
+
+/**
+ * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_ctl_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_112_127 : 16;
+ u64 length : 16;
+ u64 reserved_67_95 : 29;
+ u64 fw : 1;
+ u64 nc : 1;
+ u64 data_be : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data_be : 1;
+ u64 nc : 1;
+ u64 fw : 1;
+ u64 reserved_67_95 : 29;
+ u64 length : 16;
+ u64 reserved_112_127 : 16;
+#endif
+ } s;
+};
+
+/**
+ * union zip_inst_s - ZIP Instruction Structure.
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
+ * the structure).
+ */
+union zip_inst_s {
+ u64 u_reg64[16];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1;
+ u64 reserved_56_62 : 7;
+ u64 totaloutputlength : 24;
+ u64 reserved_27_31 : 5;
+ u64 exn : 3;
+ u64 reserved_23_23 : 1;
+ u64 exbits : 7;
+ u64 reserved_12_15 : 4;
+ u64 sf : 1;
+ u64 ss : 2;
+ u64 cc : 2;
+ u64 ef : 1;
+ u64 bf : 1;
+ u64 ce : 1;
+ u64 reserved_3_3 : 1;
+ u64 ds : 1;
+ u64 dg : 1;
+ u64 hg : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 hg : 1;
+ u64 dg : 1;
+ u64 ds : 1;
+ u64 reserved_3_3 : 1;
+ u64 ce : 1;
+ u64 bf : 1;
+ u64 ef : 1;
+ u64 cc : 2;
+ u64 ss : 2;
+ u64 sf : 1;
+ u64 reserved_12_15 : 4;
+ u64 exbits : 7;
+ u64 reserved_23_23 : 1;
+ u64 exn : 3;
+ u64 reserved_27_31 : 5;
+ u64 totaloutputlength : 24;
+ u64 reserved_56_62 : 7;
+ u64 doneint : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 historylength : 16;
+ u64 reserved_96_111 : 16;
+ u64 adlercrc32 : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 adlercrc32 : 32;
+ u64 reserved_96_111 : 16;
+ u64 historylength : 16;
+#endif
+ union zip_zptr_addr_s ctx_ptr_addr;
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+ union zip_zptr_addr_s his_ptr_addr;
+ union zip_zptr_ctl_s his_ptr_ctl;
+ union zip_zptr_addr_s inp_ptr_addr;
+ union zip_zptr_ctl_s inp_ptr_ctl;
+ union zip_zptr_addr_s out_ptr_addr;
+ union zip_zptr_ctl_s out_ptr_ctl;
+ union zip_zptr_addr_s res_ptr_addr;
+ union zip_zptr_ctl_s res_ptr_ctl;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_817_831 : 15;
+ u64 wq_ptr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 wq_ptr : 49;
+ u64 reserved_817_831 : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_882_895 : 14;
+ u64 tt : 2;
+ u64 reserved_874_879 : 6;
+ u64 grp : 10;
+ u64 tag : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tag : 32;
+ u64 grp : 10;
+ u64 reserved_874_879 : 6;
+ u64 tt : 2;
+ u64 reserved_882_895 : 14;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_896_959 : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_896_959 : 64;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_960_1023 : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_960_1023 : 64;
+#endif
+ } s;
+};
+
+/**
+ * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
+ * Structure
+ *
+ * ZIP_NPTR structure is used to chain all the zip instruction buffers
+ * together. ZIP instruction buffers are managed (allocated and released) by
+ * the software.
+ */
+union zip_nptr_s {
+ u64 u_reg64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+/**
+ * union zip_zptr_s - ZIP Generic Pointer Structure.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ u64 u_reg64[2];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 addr : 49;
+ u64 reserved_49_63 : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_112_127 : 16;
+ u64 length : 16;
+ u64 reserved_67_95 : 29;
+ u64 fw : 1;
+ u64 nc : 1;
+ u64 data_be : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data_be : 1;
+ u64 nc : 1;
+ u64 fw : 1;
+ u64 reserved_67_95 : 29;
+ u64 length : 16;
+ u64 reserved_112_127 : 16;
+#endif
+ } s;
+};
+
+/**
+ * union zip_zres_s - ZIP Result Structure
+ *
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation of
+ * the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ u64 u_reg64[3];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 crc32 : 32;
+ u64 adler32 : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 adler32 : 32;
+ u64 crc32 : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 totalbyteswritten : 32;
+ u64 totalbytesread : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 totalbytesread : 32;
+ u64 totalbyteswritten : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 totalbitsprocessed : 32;
+ u64 doneint : 1;
+ u64 reserved_155_158 : 4;
+ u64 exn : 3;
+ u64 reserved_151_151 : 1;
+ u64 exbits : 7;
+ u64 reserved_137_143 : 7;
+ u64 ef : 1;
+
+ volatile u64 compcode : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+
+ volatile u64 compcode : 8;
+ u64 ef : 1;
+ u64 reserved_137_143 : 7;
+ u64 exbits : 7;
+ u64 reserved_151_151 : 1;
+ u64 exn : 3;
+ u64 reserved_155_158 : 4;
+ u64 doneint : 1;
+ u64 totalbitsprocessed : 32;
+#endif
+ } s;
+};
+
+/**
+ * union zip_cmd_ctl - Structure representing the register that controls
+ * clock and reset.
+ */
+union zip_cmd_ctl {
+ u64 u_reg64;
+ struct zip_cmd_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63 : 62;
+ u64 forceclk : 1;
+ u64 reset : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reset : 1;
+ u64 forceclk : 1;
+ u64 reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+#define ZIP_CMD_CTL 0x0ull
+
+/**
+ * union zip_constants - Data structure representing the register that contains
+ * all of the current implementation-related parameters of the zip core in this
+ * chip.
+ */
+union zip_constants {
+ u64 u_reg64;
+ struct zip_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 nexec : 8;
+ u64 reserved_49_55 : 7;
+ u64 syncflush_capable : 1;
+ u64 depth : 16;
+ u64 onfsize : 12;
+ u64 ctxsize : 12;
+ u64 reserved_1_7 : 7;
+ u64 disabled : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 disabled : 1;
+ u64 reserved_1_7 : 7;
+ u64 ctxsize : 12;
+ u64 onfsize : 12;
+ u64 depth : 16;
+ u64 syncflush_capable : 1;
+ u64 reserved_49_55 : 7;
+ u64 nexec : 8;
+#endif
+ } s;
+};
+
+#define ZIP_CONSTANTS 0x00A0ull
+
+/**
+ * union zip_corex_bist_status - Represents registers which have the BIST
+ * status of memories in zip cores.
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_corex_bist_status {
+ u64 u_reg64;
+ struct zip_corex_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_53_63 : 11;
+ u64 bstatus : 53;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 bstatus : 53;
+ u64 reserved_53_63 : 11;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0520ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_ctl_bist_status - Represents register that has the BIST status of
+ * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
+ * buffer, output data buffers).
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_ctl_bist_status {
+ u64 u_reg64;
+ struct zip_ctl_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_9_63 : 55;
+ u64 bstatus : 9;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 bstatus : 9;
+ u64 reserved_9_63 : 55;
+#endif
+ } s;
+};
+
+#define ZIP_CTL_BIST_STATUS 0x0510ull
+
+/**
+ * union zip_ctl_cfg - Represents the register that controls the behavior of
+ * the ZIP DMA engines.
+ *
+ * It is recommended to keep default values for normal operation. Changing the
+ * values of the fields may be useful for diagnostics.
+ */
+union zip_ctl_cfg {
+ u64 u_reg64;
+ struct zip_ctl_cfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_52_63 : 12;
+ u64 ildf : 4;
+ u64 reserved_36_47 : 12;
+ u64 drtf : 4;
+ u64 reserved_27_31 : 5;
+ u64 stcf : 3;
+ u64 reserved_19_23 : 5;
+ u64 ldf : 3;
+ u64 reserved_2_15 : 14;
+ u64 busy : 1;
+ u64 reserved_0_0 : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_0 : 1;
+ u64 busy : 1;
+ u64 reserved_2_15 : 14;
+ u64 ldf : 3;
+ u64 reserved_19_23 : 5;
+ u64 stcf : 3;
+ u64 reserved_27_31 : 5;
+ u64 drtf : 4;
+ u64 reserved_36_47 : 12;
+ u64 ildf : 4;
+ u64 reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+#define ZIP_CTL_CFG 0x0560ull
+
+/**
+ * union zip_dbg_corex_inst - Represents the registers that reflect the status
+ * of the current instruction that the ZIP core is executing or has executed.
+ *
+ * These registers are only for debug use.
+ */
+union zip_dbg_corex_inst {
+ u64 u_reg64;
+ struct zip_dbg_corex_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_35_62 : 28;
+ u64 qid : 3;
+ u64 iid : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 iid : 32;
+ u64 qid : 3;
+ u64 reserved_35_62 : 28;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_COREX_INST(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0640ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_dbg_corex_sta - Represents registers that reflect the status of
+ * the zip cores.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_corex_sta {
+ u64 u_reg64;
+ struct zip_dbg_corex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_37_62 : 26;
+ u64 ist : 5;
+ u64 nie : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 nie : 32;
+ u64 ist : 5;
+ u64 reserved_37_62 : 26;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_COREX_STA(u64 param1)
+{
+ if (((param1 <= 1)))
+ return 0x0680ull + (param1 & 1) * 0x8ull;
+ pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
+ * instruction queues.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_quex_sta {
+ u64 u_reg64;
+ struct zip_dbg_quex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 busy : 1;
+ u64 reserved_56_62 : 7;
+ u64 rqwc : 24;
+ u64 nii : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 nii : 32;
+ u64 rqwc : 24;
+ u64 reserved_56_62 : 7;
+ u64 busy : 1;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1800ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_ecc_ctl - Represents the register that enables ECC for each
+ * individual internal memory that requires ECC.
+ *
+ * For debug purpose, it can also flip one or two bits in the ECC data.
+ */
+union zip_ecc_ctl {
+ u64 u_reg64;
+ struct zip_ecc_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_19_63 : 45;
+ u64 vmem_cdis : 1;
+ u64 vmem_fs : 2;
+ u64 reserved_15_15 : 1;
+ u64 idf1_cdis : 1;
+ u64 idf1_fs : 2;
+ u64 reserved_11_11 : 1;
+ u64 idf0_cdis : 1;
+ u64 idf0_fs : 2;
+ u64 reserved_7_7 : 1;
+ u64 gspf_cdis : 1;
+ u64 gspf_fs : 2;
+ u64 reserved_3_3 : 1;
+ u64 iqf_cdis : 1;
+ u64 iqf_fs : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 iqf_fs : 2;
+ u64 iqf_cdis : 1;
+ u64 reserved_3_3 : 1;
+ u64 gspf_fs : 2;
+ u64 gspf_cdis : 1;
+ u64 reserved_7_7 : 1;
+ u64 idf0_fs : 2;
+ u64 idf0_cdis : 1;
+ u64 reserved_11_11 : 1;
+ u64 idf1_fs : 2;
+ u64 idf1_cdis : 1;
+ u64 reserved_15_15 : 1;
+ u64 vmem_fs : 2;
+ u64 vmem_cdis : 1;
+ u64 reserved_19_63 : 45;
+#endif
+ } s;
+};
+
+#define ZIP_ECC_CTL 0x0568ull
+
+/* NCB - zip_ecce_ena_w1c */
+union zip_ecce_ena_w1c {
+ u64 u_reg64;
+ struct zip_ecce_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_ENA_W1C 0x0598ull
+
+/* NCB - zip_ecce_ena_w1s */
+union zip_ecce_ena_w1s {
+ u64 u_reg64;
+ struct zip_ecce_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_ENA_W1S 0x0590ull
+
+/**
+ * union zip_ecce_int - Represents the register that contains the status of the
+ * ECC interrupt sources.
+ */
+union zip_ecce_int {
+ u64 u_reg64;
+ struct zip_ecce_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_INT 0x0580ull
+
+/* NCB - zip_ecce_int_w1s */
+union zip_ecce_int_w1s {
+ u64 u_reg64;
+ struct zip_ecce_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_37_63 : 27;
+ u64 dbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 sbe : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 sbe : 5;
+ u64 reserved_5_31 : 27;
+ u64 dbe : 5;
+ u64 reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#define ZIP_ECCE_INT_W1S 0x0588ull
+
+/* NCB - zip_fife_ena_w1c */
+union zip_fife_ena_w1c {
+ u64 u_reg64;
+ struct zip_fife_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_ENA_W1C 0x0090ull
+
+/* NCB - zip_fife_ena_w1s */
+union zip_fife_ena_w1s {
+ u64 u_reg64;
+ struct zip_fife_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_ENA_W1S 0x0088ull
+
+/* NCB - zip_fife_int */
+union zip_fife_int {
+ u64 u_reg64;
+ struct zip_fife_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_INT 0x0078ull
+
+/* NCB - zip_fife_int_w1s */
+union zip_fife_int_w1s {
+ u64 u_reg64;
+ struct zip_fife_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63 : 22;
+ u64 asserts : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 asserts : 42;
+ u64 reserved_42_63 : 22;
+#endif
+ } s;
+};
+
+#define ZIP_FIFE_INT_W1S 0x0080ull
+
+/**
+ * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
+ *
+ * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_pbax {
+ u64 u_reg64;
+ struct zip_msix_pbax_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 pend : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 pend : 64;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_PBAX(u64 param1)
+{
+ if (((param1 == 0)))
+ return 0x0000838000FF0000ull;
+ pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_addr {
+ u64 u_reg64;
+ struct zip_msix_vecx_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 addr : 47;
+ u64 reserved_1_1 : 1;
+ u64 secvec : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 secvec : 1;
+ u64 reserved_1_1 : 1;
+ u64 addr : 47;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
+{
+ if (((param1 <= 17)))
+ return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
+ pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_ctl {
+ u64 u_reg64;
+ struct zip_msix_vecx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_33_63 : 31;
+ u64 mask : 1;
+ u64 reserved_20_31 : 12;
+ u64 data : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 data : 20;
+ u64 reserved_20_31 : 12;
+ u64 mask : 1;
+ u64 reserved_33_63 : 31;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
+{
+ if (((param1 <= 17)))
+ return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
+ pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done - Represents the registers that contain the per-queue
+ * instruction done count.
+ */
+union zip_quex_done {
+ u64 u_reg64;
+ struct zip_quex_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 done : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ack - Represents the registers on write to which will
+ * decrement the per-queue instructiona done count.
+ */
+union zip_quex_done_ack {
+ u64 u_reg64;
+ struct zip_quex_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 done_ack : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ack : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1c - Represents the register which when written
+ * 1 to will disable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1c {
+ u64 u_reg64;
+ struct zip_quex_done_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_1_63 : 63;
+ u64 done_ena : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ena : 1;
+ u64 reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2600ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
+ * will enable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1s {
+ u64 u_reg64;
+ struct zip_quex_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_1_63 : 63;
+ u64 done_ena : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 done_ena : 1;
+ u64 reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_done_wait - Represents the register that specifies the per
+ * queue interrupt coalescing settings.
+ */
+union zip_quex_done_wait {
+ u64 u_reg64;
+ struct zip_quex_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_48_63 : 16;
+ u64 time_wait : 16;
+ u64 reserved_20_31 : 12;
+ u64 num_wait : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 num_wait : 20;
+ u64 reserved_20_31 : 12;
+ u64 time_wait : 16;
+ u64 reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x2800ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_doorbell - Represents doorbell registers for the ZIP
+ * instruction queues.
+ */
+union zip_quex_doorbell {
+ u64 u_reg64;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63 : 44;
+ u64 dbell_cnt : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dbell_cnt : 20;
+ u64 reserved_20_63 : 44;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x4000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
+ return 0;
+}
+
+union zip_quex_err_ena_w1c {
+ u64 u_reg64;
+ struct zip_quex_err_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3600ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
+ return 0;
+}
+
+union zip_quex_err_ena_w1s {
+ u64 u_reg64;
+ struct zip_quex_err_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_err_int - Represents registers that contain the per-queue
+ * error interrupts.
+ */
+union zip_quex_err_int {
+ u64 u_reg64;
+ struct zip_quex_err_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
+ return 0;
+}
+
+/* NCB - zip_que#_err_int_w1s */
+union zip_quex_err_int_w1s {
+ u64 u_reg64;
+ struct zip_quex_err_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_5_63 : 59;
+ u64 mdbe : 1;
+ u64 nwrp : 1;
+ u64 nrrp : 1;
+ u64 irde : 1;
+ u64 dovf : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 dovf : 1;
+ u64 irde : 1;
+ u64 nrrp : 1;
+ u64 nwrp : 1;
+ u64 mdbe : 1;
+ u64 reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x3200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_gcfg - Represents the registers that reflect status of the
+ * zip instruction queues,debug use only.
+ */
+union zip_quex_gcfg {
+ u64 u_reg64;
+ struct zip_quex_gcfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_4_63 : 60;
+ u64 iqb_ldwb : 1;
+ u64 cbw_sty : 1;
+ u64 l2ld_cmd : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 l2ld_cmd : 2;
+ u64 cbw_sty : 1;
+ u64 iqb_ldwb : 1;
+ u64 reserved_4_63 : 60;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_GCFG(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1A00ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_map - Represents the registers that control how each
+ * instruction queue maps to zip cores.
+ */
+union zip_quex_map {
+ u64 u_reg64;
+ struct zip_quex_map_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63 : 62;
+ u64 zce : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 zce : 2;
+ u64 reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_MAP(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1400ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_MAP: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_sbuf_addr - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed after SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_CTL.
+ */
+union zip_quex_sbuf_addr {
+ u64 u_reg64;
+ struct zip_quex_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_49_63 : 15;
+ u64 ptr : 42;
+ u64 off : 7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 off : 7;
+ u64 ptr : 42;
+ u64 reserved_49_63 : 15;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1000ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed before SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_ADDR.
+ */
+union zip_quex_sbuf_ctl {
+ u64 u_reg64;
+ struct zip_quex_sbuf_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63 : 19;
+ u64 size : 13;
+ u64 inst_be : 1;
+ u64 reserved_24_30 : 7;
+ u64 stream_id : 8;
+ u64 reserved_12_15 : 4;
+ u64 aura : 12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 aura : 12;
+ u64 reserved_12_15 : 4;
+ u64 stream_id : 8;
+ u64 reserved_24_30 : 7;
+ u64 inst_be : 1;
+ u64 size : 13;
+ u64 reserved_45_63 : 19;
+#endif
+ } s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
+{
+ if (((param1 <= 7)))
+ return 0x1200ull + (param1 & 7) * 0x8ull;
+ pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
+ return 0;
+}
+
+/**
+ * union zip_que_ena - Represents queue enable register
+ *
+ * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
+ */
+union zip_que_ena {
+ u64 u_reg64;
+ struct zip_que_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_8_63 : 56;
+ u64 ena : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 ena : 8;
+ u64 reserved_8_63 : 56;
+#endif
+ } s;
+};
+
+#define ZIP_QUE_ENA 0x0500ull
+
+/**
+ * union zip_que_pri - Represents the register that defines the priority
+ * between instruction queues.
+ */
+union zip_que_pri {
+ u64 u_reg64;
+ struct zip_que_pri_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_8_63 : 56;
+ u64 pri : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 pri : 8;
+ u64 reserved_8_63 : 56;
+#endif
+ } s;
+};
+
+#define ZIP_QUE_PRI 0x0508ull
+
+/**
+ * union zip_throttle - Represents the register that controls the maximum
+ * number of in-flight X2I data fetch transactions.
+ *
+ * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
+ * accesses; it is not recommended for normal operation, but may be useful for
+ * diagnostics.
+ */
+union zip_throttle {
+ u64 u_reg64;
+ struct zip_throttle_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_6_63 : 58;
+ u64 ld_infl : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 ld_infl : 6;
+ u64 reserved_6_63 : 58;
+#endif
+ } s;
+};
+
+#define ZIP_THROTTLE 0x0010ull
+
+#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 3a47cdb8f0c8..9e845e866dec 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -19,13 +19,10 @@
#define AES_BUF_ORDER 2
#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
& ~(AES_BLOCK_SIZE - 1))
+#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
+ AES_BLOCK_SIZE * 2)
+#define AES_MAX_CT_SIZE 6
-/* AES command token size */
-#define AES_CT_SIZE_ECB 2
-#define AES_CT_SIZE_CBC 3
-#define AES_CT_SIZE_CTR 3
-#define AES_CT_SIZE_GCM_OUT 5
-#define AES_CT_SIZE_GCM_IN 6
#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
/* AES-CBC/ECB/CTR command token */
@@ -50,6 +47,8 @@
#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
+#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
+#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
/* AES transform information word 1 fields */
#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
@@ -59,10 +58,9 @@
#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
-#define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21)
-#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
/* AES flags */
+#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
#define AES_FLAGS_ECB BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CTR BIT(2)
@@ -70,19 +68,15 @@
#define AES_FLAGS_ENCRYPT BIT(4)
#define AES_FLAGS_BUSY BIT(5)
+#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
+
/**
- * Command token(CT) is a set of hardware instructions that
- * are used to control engine's processing flow of AES.
- *
- * Transform information(TFM) is used to define AES state and
- * contains all keys and initial vectors.
- *
- * The engine requires CT and TFM to do:
- * - Commands decoding and control of the engine's data path.
- * - Coordinating hardware data fetch and store operations.
- * - Result token construction and output.
+ * mtk_aes_info - hardware information of AES
+ * @cmd: command token, hardware instruction
+ * @tfm: transform state of cipher algorithm.
+ * @state: contains keys and initial vectors.
*
- * Memory map of GCM's TFM:
+ * Memory layout of GCM buffer:
* /-----------\
* | AES KEY | 128/196/256 bits
* |-----------|
@@ -90,14 +84,16 @@
* |-----------|
* | IVs | 4 * 4 bytes
* \-----------/
+ *
+ * The engine requires all these info to do:
+ * - Commands decoding and control of the engine's data path.
+ * - Coordinating hardware data fetch and store operations.
+ * - Result token construction and output.
*/
-struct mtk_aes_ct {
- __le32 cmd[AES_CT_SIZE_GCM_IN];
-};
-
-struct mtk_aes_tfm {
- __le32 ctrl[2];
- __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)];
+struct mtk_aes_info {
+ __le32 cmd[AES_MAX_CT_SIZE];
+ __le32 tfm[2];
+ __le32 state[AES_MAX_STATE_BUF_SIZE];
};
struct mtk_aes_reqctx {
@@ -107,11 +103,12 @@ struct mtk_aes_reqctx {
struct mtk_aes_base_ctx {
struct mtk_cryp *cryp;
u32 keylen;
+ __le32 keymode;
+
mtk_aes_fn start;
- struct mtk_aes_ct ct;
+ struct mtk_aes_info info;
dma_addr_t ct_dma;
- struct mtk_aes_tfm tfm;
dma_addr_t tfm_dma;
__le32 ct_hdr;
@@ -248,6 +245,33 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
sg->length += dma->remainder;
}
+static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
+{
+ int i;
+
+ for (i = 0; i < SIZE_IN_WORDS(size); i++)
+ dst[i] = cpu_to_le32(src[i]);
+}
+
+static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
+{
+ int i;
+
+ for (i = 0; i < SIZE_IN_WORDS(size); i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline int mtk_aes_complete(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes,
+ int err)
+{
+ aes->flags &= ~AES_FLAGS_BUSY;
+ aes->areq->complete(aes->areq, err);
+ /* Handle new request */
+ tasklet_schedule(&aes->queue_task);
+ return err;
+}
+
/*
* Write descriptors for processing. This will configure the engine, load
* the transform information and then start the packet processing.
@@ -262,7 +286,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Write command descriptors */
for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
- cmd = ring->cmd_base + ring->cmd_pos;
+ cmd = ring->cmd_next;
cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
cmd->buf = cpu_to_le32(sg_dma_address(ssg));
@@ -274,25 +298,30 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
}
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
+ /* Shift ring buffer and check boundary */
+ if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
+ ring->cmd_next = ring->cmd_base;
}
cmd->hdr |= MTK_DESC_LAST;
/* Prepare result descriptors */
for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
- res = ring->res_base + ring->res_pos;
+ res = ring->res_next;
res->hdr = MTK_DESC_BUF_LEN(dsg->length);
res->buf = cpu_to_le32(sg_dma_address(dsg));
if (nents == 0)
res->hdr |= MTK_DESC_FIRST;
- if (++ring->res_pos == MTK_DESC_NUM)
- ring->res_pos = 0;
+ /* Shift ring buffer and check boundary */
+ if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
+ ring->res_next = ring->res_base;
}
res->hdr |= MTK_DESC_LAST;
+ /* Pointer to current result descriptor */
+ ring->res_prev = res;
+
/* Prepare enough space for authenticated tag */
if (aes->flags & AES_FLAGS_GCM)
res->hdr += AES_BLOCK_SIZE;
@@ -313,9 +342,7 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
- DMA_TO_DEVICE);
- dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
+ dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
DMA_TO_DEVICE);
if (aes->src.sg == aes->dst.sg) {
@@ -346,16 +373,14 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
+ struct mtk_aes_info *info = &ctx->info;
- ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
+ ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
- return -EINVAL;
+ goto exit;
- ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma)))
- goto tfm_map_err;
+ ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
if (aes->src.sg == aes->dst.sg) {
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
@@ -382,13 +407,9 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
return mtk_aes_xmit(cryp, aes);
sg_map_err:
- dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
- DMA_TO_DEVICE);
-tfm_map_err:
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
- DMA_TO_DEVICE);
-
- return -EINVAL;
+ dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
+exit:
+ return mtk_aes_complete(cryp, aes, -EINVAL);
}
/* Initialize transform information of CBC/ECB/CTR mode */
@@ -397,50 +418,43 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
{
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
+ struct mtk_aes_info *info = &ctx->info;
+ u32 cnt = 0;
ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
- ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
- ctx->ct.cmd[1] = AES_CMD1;
+ info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
+ info->cmd[cnt++] = AES_CMD1;
+ info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
if (aes->flags & AES_FLAGS_ENCRYPT)
- ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
+ info->tfm[0] |= AES_TFM_BASIC_OUT;
else
- ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
+ info->tfm[0] |= AES_TFM_BASIC_IN;
- if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
- ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
- else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
- ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
- else
- ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
-
- if (aes->flags & AES_FLAGS_CBC) {
- const u32 *iv = (const u32 *)req->info;
- u32 *iv_state = ctx->tfm.state + ctx->keylen;
- int i;
-
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE));
- ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
-
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- iv_state[i] = cpu_to_le32(iv[i]);
-
- ctx->ct.cmd[2] = AES_CMD2;
- ctx->ct_size = AES_CT_SIZE_CBC;
- } else if (aes->flags & AES_FLAGS_ECB) {
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
- ctx->tfm.ctrl[1] = AES_TFM_ECB;
-
- ctx->ct_size = AES_CT_SIZE_ECB;
- } else if (aes->flags & AES_FLAGS_CTR) {
- ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE));
- ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV;
-
- ctx->ct.cmd[2] = AES_CMD2;
- ctx->ct_size = AES_CT_SIZE_CTR;
+ switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
+ case AES_FLAGS_CBC:
+ info->tfm[1] = AES_TFM_CBC;
+ break;
+ case AES_FLAGS_ECB:
+ info->tfm[1] = AES_TFM_ECB;
+ goto ecb;
+ case AES_FLAGS_CTR:
+ info->tfm[1] = AES_TFM_CTR_LOAD;
+ goto ctr;
+
+ default:
+ /* Should not happen... */
+ return;
}
+
+ mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+ AES_BLOCK_SIZE);
+ctr:
+ info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
+ info->tfm[1] |= AES_TFM_FULL_IV;
+ info->cmd[cnt++] = AES_CMD2;
+ecb:
+ ctx->ct_size = cnt;
}
static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -465,7 +479,7 @@ static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
padlen = mtk_aes_padlen(len);
if (len + padlen > AES_BUF_SIZE)
- return -ENOMEM;
+ return mtk_aes_complete(cryp, aes, -ENOMEM);
if (!src_aligned) {
sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -525,13 +539,10 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
return ctx->start(cryp, aes);
}
-static int mtk_aes_complete(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes)
{
- aes->flags &= ~AES_FLAGS_BUSY;
- aes->areq->complete(aes->areq, 0);
-
- /* Handle new request */
- return mtk_aes_handle_queue(cryp, aes->id, NULL);
+ return mtk_aes_complete(cryp, aes, 0);
}
static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
@@ -540,7 +551,7 @@ static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
mtk_aes_set_mode(aes, rctx);
- aes->resume = mtk_aes_complete;
+ aes->resume = mtk_aes_transfer_complete;
return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
}
@@ -557,15 +568,14 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct scatterlist *src, *dst;
- int i;
- u32 start, end, ctr, blocks, *iv_state;
+ u32 start, end, ctr, blocks;
size_t datalen;
bool fragmented = false;
/* Check for transfer completion. */
cctx->offset += aes->total;
if (cctx->offset >= req->nbytes)
- return mtk_aes_complete(cryp, aes);
+ return mtk_aes_transfer_complete(cryp, aes);
/* Compute data length. */
datalen = req->nbytes - cctx->offset;
@@ -587,9 +597,8 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
/* Write IVs into transform state buffer. */
- iv_state = ctx->tfm.state + ctx->keylen;
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- iv_state[i] = cpu_to_le32(cctx->iv[i]);
+ mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
+ AES_BLOCK_SIZE);
if (unlikely(fragmented)) {
/*
@@ -599,7 +608,6 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
cctx->iv[3] = cpu_to_be32(ctr);
crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
}
- aes->resume = mtk_aes_ctr_transfer;
return mtk_aes_dma(cryp, aes, src, dst, datalen);
}
@@ -615,6 +623,7 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
cctx->offset = 0;
aes->total = 0;
+ aes->resume = mtk_aes_ctr_transfer;
return mtk_aes_ctr_transfer(cryp, aes);
}
@@ -624,21 +633,25 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const u32 *aes_key = (const u32 *)key;
- u32 *key_state = ctx->tfm.state;
- int i;
- if (keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->keymode = AES_TFM_128BITS;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->keymode = AES_TFM_192BITS;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->keymode = AES_TFM_256BITS;
+ break;
+
+ default:
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->keylen = SIZE_IN_WORDS(keylen);
-
- for (i = 0; i < ctx->keylen; i++)
- key_state[i] = cpu_to_le32(aes_key[i]);
+ mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
return 0;
}
@@ -789,6 +802,19 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
return container_of(ctx, struct mtk_aes_gcm_ctx, base);
}
+/*
+ * Engine will verify and compare tag automatically, so we just need
+ * to check returned status which stored in the result descriptor.
+ */
+static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
+ struct mtk_aes_rec *aes)
+{
+ u32 status = cryp->ring[aes->id]->res_prev->ct;
+
+ return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
+ -EBADMSG : 0);
+}
+
/* Initialize transform information of GCM mode */
static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes,
@@ -797,45 +823,35 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
struct aead_request *req = aead_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- const u32 *iv = (const u32 *)req->iv;
- u32 *iv_state = ctx->tfm.state + ctx->keylen +
- SIZE_IN_WORDS(AES_BLOCK_SIZE);
+ struct mtk_aes_info *info = &ctx->info;
u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- int i;
+ u32 cnt = 0;
ctx->ct_hdr = AES_CT_CTRL_HDR | len;
- ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
- ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
- ctx->ct.cmd[2] = AES_GCM_CMD2;
- ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
+ info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
+ info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
+ info->cmd[cnt++] = AES_GCM_CMD2;
+ info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
if (aes->flags & AES_FLAGS_ENCRYPT) {
- ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
- ctx->ct_size = AES_CT_SIZE_GCM_OUT;
- ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT;
+ info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
+ info->tfm[0] = AES_TFM_GCM_OUT;
} else {
- ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
- ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
- ctx->ct_size = AES_CT_SIZE_GCM_IN;
- ctx->tfm.ctrl[0] = AES_TFM_GCM_IN;
+ info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
+ info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
+ info->tfm[0] = AES_TFM_GCM_IN;
}
+ ctx->ct_size = cnt;
- if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
- ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
- else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
- ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
- else
- ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
+ info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
+ ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
+ ctx->keymode;
+ info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
+ AES_TFM_ENC_HASH;
- ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH |
- AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS(
- AES_BLOCK_SIZE + ivsize));
- ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE |
- AES_TFM_3IV | AES_TFM_ENC_HASH;
-
- for (i = 0; i < SIZE_IN_WORDS(ivsize); i++)
- iv_state[i] = cpu_to_le32(iv[i]);
+ mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
+ AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
}
static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -856,7 +872,7 @@ static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
if (!src_aligned || !dst_aligned) {
if (aes->total > AES_BUF_SIZE)
- return -ENOMEM;
+ return mtk_aes_complete(cryp, aes, -ENOMEM);
if (!src_aligned) {
sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -892,6 +908,8 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
if (aes->flags & AES_FLAGS_ENCRYPT) {
u32 tag[4];
+
+ aes->resume = mtk_aes_transfer_complete;
/* Compute total process length. */
aes->total = len + gctx->authsize;
/* Compute text length. */
@@ -899,10 +917,10 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Hardware will append authenticated tag to output buffer */
scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
} else {
+ aes->resume = mtk_aes_gcm_tag_verify;
aes->total = len;
gctx->textlen = req->cryptlen - gctx->authsize;
}
- aes->resume = mtk_aes_complete;
return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
}
@@ -915,7 +933,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
rctx->mode = AES_FLAGS_GCM | mode;
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
- &req->base);
+ &req->base);
}
static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
@@ -949,24 +967,26 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct scatterlist sg[1];
struct skcipher_request req;
} *data;
- const u32 *aes_key;
- u32 *key_state, *hash_state;
- int err, i;
+ int err;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->keymode = AES_TFM_128BITS;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->keymode = AES_TFM_192BITS;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->keymode = AES_TFM_256BITS;
+ break;
+
+ default:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- key_state = ctx->tfm.state;
- aes_key = (u32 *)key;
ctx->keylen = SIZE_IN_WORDS(keylen);
- for (i = 0; i < ctx->keylen; i++)
- ctx->tfm.state[i] = cpu_to_le32(aes_key[i]);
-
/* Same as crypto_gcm_setkey() from crypto/gcm.c */
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
@@ -1001,10 +1021,11 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
- hash_state = key_state + ctx->keylen;
-
- for (i = 0; i < 4; i++)
- hash_state[i] = cpu_to_be32(data->hash[i]);
+ /* Write key into state buffer */
+ mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
+ /* Write key(H) into state buffer */
+ mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
+ AES_BLOCK_SIZE);
out:
kzfree(data);
return err;
@@ -1092,58 +1113,36 @@ static struct aead_alg aes_gcm_alg = {
},
};
-static void mtk_aes_enc_task(unsigned long data)
+static void mtk_aes_queue_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_aes_rec *aes = cryp->aes[0];
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
- mtk_aes_unmap(cryp, aes);
- aes->resume(cryp, aes);
+ mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
}
-static void mtk_aes_dec_task(unsigned long data)
+static void mtk_aes_done_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_aes_rec *aes = cryp->aes[1];
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
+ struct mtk_cryp *cryp = aes->cryp;
mtk_aes_unmap(cryp, aes);
aes->resume(cryp, aes);
}
-static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id)
+static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_aes_rec *aes = cryp->aes[0];
- u32 val = mtk_aes_read(cryp, RDR_STAT(RING0));
+ struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
+ struct mtk_cryp *cryp = aes->cryp;
+ u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
- mtk_aes_write(cryp, RDR_STAT(RING0), val);
+ mtk_aes_write(cryp, RDR_STAT(aes->id), val);
if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(RING0),
+ mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
+ mtk_aes_write(cryp, RDR_THRESH(aes->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
- tasklet_schedule(&aes->task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
-{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_aes_rec *aes = cryp->aes[1];
- u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
-
- mtk_aes_write(cryp, RDR_STAT(RING1), val);
-
- if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(RING1),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&aes->task);
+ tasklet_schedule(&aes->done_task);
} else {
dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
}
@@ -1171,14 +1170,20 @@ static int mtk_aes_record_init(struct mtk_cryp *cryp)
if (!aes[i]->buf)
goto err_cleanup;
- aes[i]->id = i;
+ aes[i]->cryp = cryp;
spin_lock_init(&aes[i]->lock);
crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
+
+ tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
+ (unsigned long)aes[i]);
+ tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
+ (unsigned long)aes[i]);
}
- tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp);
- tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp);
+ /* Link to ring0 and ring1 respectively */
+ aes[0]->id = MTK_RING0;
+ aes[1]->id = MTK_RING1;
return 0;
@@ -1196,7 +1201,9 @@ static void mtk_aes_record_free(struct mtk_cryp *cryp)
int i;
for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->aes[i]->task);
+ tasklet_kill(&cryp->aes[i]->done_task);
+ tasklet_kill(&cryp->aes[i]->queue_task);
+
free_page((unsigned long)cryp->aes[i]->buf);
kfree(cryp->aes[i]);
}
@@ -1246,25 +1253,23 @@ int mtk_cipher_alg_register(struct mtk_cryp *cryp)
if (ret)
goto err_record;
- /* Ring0 is use by encryption record */
- ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq,
- IRQF_TRIGGER_LOW, "mtk-aes", cryp);
+ ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
+ 0, "mtk-aes", cryp->aes[0]);
if (ret) {
- dev_err(cryp->dev, "unable to request AES encryption irq.\n");
+ dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res;
}
- /* Ring1 is use by decryption record */
- ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq,
- IRQF_TRIGGER_LOW, "mtk-aes", cryp);
+ ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
+ 0, "mtk-aes", cryp->aes[1]);
if (ret) {
- dev_err(cryp->dev, "unable to request AES decryption irq.\n");
+ dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res;
}
/* Enable ring0 and ring1 interrupt */
- mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0);
- mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1);
+ mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
+ mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
spin_lock(&mtk_aes.lock);
list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index a9c713d4c733..b6ecc288b540 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -334,7 +334,7 @@ static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
/* Enable the 4 rings for the packet engines. */
mtk_desc_ring_link(cryp, 0xf);
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
mtk_cmd_desc_ring_setup(cryp, i, &cap);
mtk_res_desc_ring_setup(cryp, i, &cap);
}
@@ -359,7 +359,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
{
u32 val;
- if (hw == RING_MAX)
+ if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_VERSION);
else
val = readl(cryp->base + AIC_VERSION(hw));
@@ -368,7 +368,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
return -ENXIO;
- if (hw == RING_MAX)
+ if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_OPTIONS);
else
val = readl(cryp->base + AIC_OPTIONS(hw));
@@ -389,7 +389,7 @@ static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
return err;
/* Disable all interrupts and set initial configuration */
- if (hw == RING_MAX) {
+ if (hw == MTK_RING_MAX) {
writel(0, cryp->base + AIC_G_ENABLE_CTRL);
writel(0, cryp->base + AIC_G_POL_CTRL);
writel(0, cryp->base + AIC_G_TYPE_CTRL);
@@ -431,7 +431,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
{
int i;
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
cryp->ring[i]->res_base,
cryp->ring[i]->res_dma);
@@ -447,7 +447,7 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
struct mtk_ring **ring = cryp->ring;
int i, err = ENOMEM;
- for (i = 0; i < RING_MAX; i++) {
+ for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
if (!ring[i])
goto err_cleanup;
@@ -465,6 +465,9 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
+
+ ring[i]->cmd_next = ring[i]->cmd_base;
+ ring[i]->res_next = ring[i]->res_base;
}
return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
index ed6d8717f7f4..303c152dc931 100644
--- a/drivers/crypto/mediatek/mtk-platform.h
+++ b/drivers/crypto/mediatek/mtk-platform.h
@@ -38,14 +38,14 @@
* Ring 2/3 are used by SHA.
*/
enum {
- RING0 = 0,
- RING1,
- RING2,
- RING3,
- RING_MAX,
+ MTK_RING0,
+ MTK_RING1,
+ MTK_RING2,
+ MTK_RING3,
+ MTK_RING_MAX
};
-#define MTK_REC_NUM (RING_MAX / 2)
+#define MTK_REC_NUM (MTK_RING_MAX / 2)
#define MTK_IRQ_NUM 5
/**
@@ -84,11 +84,12 @@ struct mtk_desc {
/**
* struct mtk_ring - Descriptor ring
* @cmd_base: pointer to command descriptor ring base
+ * @cmd_next: pointer to the next command descriptor
* @cmd_dma: DMA address of command descriptor ring
- * @cmd_pos: current position in the command descriptor ring
* @res_base: pointer to result descriptor ring base
+ * @res_next: pointer to the next result descriptor
+ * @res_prev: pointer to the previous result descriptor
* @res_dma: DMA address of result descriptor ring
- * @res_pos: current position in the result descriptor ring
*
* A descriptor ring is a circular buffer that is used to manage
* one or more descriptors. There are two type of descriptor rings;
@@ -96,11 +97,12 @@ struct mtk_desc {
*/
struct mtk_ring {
struct mtk_desc *cmd_base;
+ struct mtk_desc *cmd_next;
dma_addr_t cmd_dma;
- u32 cmd_pos;
struct mtk_desc *res_base;
+ struct mtk_desc *res_next;
+ struct mtk_desc *res_prev;
dma_addr_t res_dma;
- u32 res_pos;
};
/**
@@ -125,9 +127,11 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/**
* struct mtk_aes_rec - AES operation record
+ * @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @areq: pointer to async request
- * @task: the tasklet is use in AES interrupt
+ * @done_task: the tasklet is use in AES interrupt
+ * @queue_task: the tasklet is used to dequeue request
* @ctx: pointer to current context
* @src: the structure that holds source sg list info
* @dst: the structure that holds destination sg list info
@@ -136,16 +140,18 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
* @resume: pointer to resume function
* @total: request buffer length
* @buf: pointer to page buffer
- * @id: record identification
+ * @id: the current use of ring
* @flags: it's describing AES operation state
* @lock: the async queue lock
*
* Structure used to record AES execution state.
*/
struct mtk_aes_rec {
+ struct mtk_cryp *cryp;
struct crypto_queue queue;
struct crypto_async_request *areq;
- struct tasklet_struct task;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
struct mtk_aes_base_ctx *ctx;
struct mtk_aes_dma src;
struct mtk_aes_dma dst;
@@ -166,19 +172,23 @@ struct mtk_aes_rec {
/**
* struct mtk_sha_rec - SHA operation record
+ * @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @req: pointer to ahash request
- * @task: the tasklet is use in SHA interrupt
- * @id: record identification
+ * @done_task: the tasklet is use in SHA interrupt
+ * @queue_task: the tasklet is used to dequeue request
+ * @id: the current use of ring
* @flags: it's describing SHA operation state
- * @lock: the ablkcipher queue lock
+ * @lock: the async queue lock
*
* Structure used to record SHA execution state.
*/
struct mtk_sha_rec {
+ struct mtk_cryp *cryp;
struct crypto_queue queue;
struct ahash_request *req;
- struct tasklet_struct task;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
u8 id;
unsigned long flags;
@@ -193,13 +203,11 @@ struct mtk_sha_rec {
* @clk_ethif: pointer to ethif clock
* @clk_cryp: pointer to crypto clock
* @irq: global system and rings IRQ
- * @ring: pointer to execution state of AES
- * @aes: pointer to execution state of SHA
- * @sha: each execution record map to a ring
+ * @ring: pointer to descriptor rings
+ * @aes: pointer to operation record of AES
+ * @sha: pointer to operation record of SHA
* @aes_list: device list of AES
* @sha_list: device list of SHA
- * @tmp: pointer to temporary buffer for internal use
- * @tmp_dma: DMA address of temporary buffer
* @rec: it's used to select SHA record for tfm
*
* Structure storing cryptographic device information.
@@ -211,15 +219,13 @@ struct mtk_cryp {
struct clk *clk_cryp;
int irq[MTK_IRQ_NUM];
- struct mtk_ring *ring[RING_MAX];
+ struct mtk_ring *ring[MTK_RING_MAX];
struct mtk_aes_rec *aes[MTK_REC_NUM];
struct mtk_sha_rec *sha[MTK_REC_NUM];
struct list_head aes_list;
struct list_head sha_list;
- void *tmp;
- dma_addr_t tmp_dma;
bool rec;
};
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 55e3805fba07..2226f12d1c7a 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -17,13 +17,13 @@
#define SHA_ALIGN_MSK (sizeof(u32) - 1)
#define SHA_QUEUE_SIZE 512
-#define SHA_TMP_BUF_SIZE 512
#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
+#define SHA_MAX_DIGEST_BUF_SIZE 32
/* SHA command token */
#define SHA_CT_SIZE 5
@@ -34,7 +34,6 @@
/* SHA transform information */
#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
-#define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21)
#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
#define SHA_TFM_START cpu_to_le32(0x1 << 4)
#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
@@ -61,31 +60,17 @@
#define SHA_FLAGS_PAD BIT(10)
/**
- * mtk_sha_ct is a set of hardware instructions(command token)
- * that are used to control engine's processing flow of SHA,
- * and it contains the first two words of transform state.
+ * mtk_sha_info - hardware information of AES
+ * @cmd: command token, hardware instruction
+ * @tfm: transform state of cipher algorithm.
+ * @state: contains keys and initial vectors.
+ *
*/
-struct mtk_sha_ct {
+struct mtk_sha_info {
__le32 ctrl[2];
__le32 cmd[3];
-};
-
-/**
- * mtk_sha_tfm is used to define SHA transform state
- * and store result digest that produced by engine.
- */
-struct mtk_sha_tfm {
- __le32 ctrl[2];
- __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
-};
-
-/**
- * mtk_sha_info consists of command token and transform state
- * of SHA, its role is similar to mtk_aes_info.
- */
-struct mtk_sha_info {
- struct mtk_sha_ct ct;
- struct mtk_sha_tfm tfm;
+ __le32 tfm[2];
+ __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
};
struct mtk_sha_reqctx {
@@ -94,7 +79,6 @@ struct mtk_sha_reqctx {
unsigned long op;
u64 digcnt;
- bool start;
size_t bufcnt;
dma_addr_t dma_addr;
@@ -153,6 +137,21 @@ static inline void mtk_sha_write(struct mtk_cryp *cryp,
writel_relaxed(value, cryp->base + offset);
}
+static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
+ struct mtk_desc **cmd_curr,
+ struct mtk_desc **res_curr,
+ int *count)
+{
+ *cmd_curr = ring->cmd_next++;
+ *res_curr = ring->res_next++;
+ (*count)++;
+
+ if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
+ ring->cmd_next = ring->cmd_base;
+ ring->res_next = ring->res_base;
+ }
+}
+
static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
{
struct mtk_cryp *cryp = NULL;
@@ -251,7 +250,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
bits[1] = cpu_to_be64(size << 3);
bits[0] = cpu_to_be64(size >> 61);
- if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
+ switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
index = ctx->bufcnt & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
*(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -259,7 +260,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
ctx->bufcnt += padlen + 16;
ctx->flags |= SHA_FLAGS_PAD;
- } else {
+ break;
+
+ default:
index = ctx->bufcnt & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
*(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -267,36 +270,35 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
ctx->bufcnt += padlen + 8;
ctx->flags |= SHA_FLAGS_PAD;
+ break;
}
}
/* Initialize basic transform information of SHA */
static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
{
- struct mtk_sha_ct *ct = &ctx->info.ct;
- struct mtk_sha_tfm *tfm = &ctx->info.tfm;
+ struct mtk_sha_info *info = &ctx->info;
ctx->ct_hdr = SHA_CT_CTRL_HDR;
ctx->ct_size = SHA_CT_SIZE;
- tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG |
- SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
+ info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
case SHA_FLAGS_SHA1:
- tfm->ctrl[0] |= SHA_TFM_SHA1;
+ info->tfm[0] |= SHA_TFM_SHA1;
break;
case SHA_FLAGS_SHA224:
- tfm->ctrl[0] |= SHA_TFM_SHA224;
+ info->tfm[0] |= SHA_TFM_SHA224;
break;
case SHA_FLAGS_SHA256:
- tfm->ctrl[0] |= SHA_TFM_SHA256;
+ info->tfm[0] |= SHA_TFM_SHA256;
break;
case SHA_FLAGS_SHA384:
- tfm->ctrl[0] |= SHA_TFM_SHA384;
+ info->tfm[0] |= SHA_TFM_SHA384;
break;
case SHA_FLAGS_SHA512:
- tfm->ctrl[0] |= SHA_TFM_SHA512;
+ info->tfm[0] |= SHA_TFM_SHA512;
break;
default:
@@ -304,13 +306,13 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
return;
}
- tfm->ctrl[1] = SHA_TFM_HASH_STORE;
- ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
- ct->ctrl[1] = tfm->ctrl[1];
+ info->tfm[1] = SHA_TFM_HASH_STORE;
+ info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
+ info->ctrl[1] = info->tfm[1];
- ct->cmd[0] = SHA_CMD0;
- ct->cmd[1] = SHA_CMD1;
- ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
+ info->cmd[0] = SHA_CMD0;
+ info->cmd[1] = SHA_CMD1;
+ info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
}
/*
@@ -319,23 +321,21 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
*/
static int mtk_sha_info_update(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha,
- size_t len)
+ size_t len1, size_t len2)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_info *info = &ctx->info;
- struct mtk_sha_ct *ct = &info->ct;
-
- if (ctx->start)
- ctx->start = false;
- else
- ct->ctrl[0] &= ~SHA_TFM_START;
ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
- ctx->ct_hdr |= cpu_to_le32(len);
- ct->cmd[0] &= ~SHA_DATA_LEN_MSK;
- ct->cmd[0] |= cpu_to_le32(len);
+ ctx->ct_hdr |= cpu_to_le32(len1 + len2);
+ info->cmd[0] &= ~SHA_DATA_LEN_MSK;
+ info->cmd[0] |= cpu_to_le32(len1 + len2);
+
+ /* Setting SHA_TFM_START only for the first iteration */
+ if (ctx->digcnt)
+ info->ctrl[0] &= ~SHA_TFM_START;
- ctx->digcnt += len;
+ ctx->digcnt += len1;
ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_BIDIRECTIONAL);
@@ -343,7 +343,8 @@ static int mtk_sha_info_update(struct mtk_cryp *cryp,
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
return -EINVAL;
}
- ctx->tfm_dma = ctx->ct_dma + sizeof(*ct);
+
+ ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
return 0;
}
@@ -408,7 +409,6 @@ static int mtk_sha_init(struct ahash_request *req)
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buffer = tctx->buf;
- ctx->start = true;
if (tctx->flags & SHA_FLAGS_HMAC) {
struct mtk_sha_hmac_ctx *bctx = tctx->base;
@@ -422,89 +422,39 @@ static int mtk_sha_init(struct ahash_request *req)
}
static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
- dma_addr_t addr, size_t len)
+ dma_addr_t addr1, size_t len1,
+ dma_addr_t addr2, size_t len2)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
- struct mtk_desc *res = ring->res_base + ring->res_pos;
- int err;
-
- err = mtk_sha_info_update(cryp, sha, len);
- if (err)
- return err;
-
- /* Fill in the command/result descriptors */
- res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
- res->buf = cpu_to_le32(cryp->tmp_dma);
-
- cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
- MTK_DESC_CT_LEN(ctx->ct_size);
-
- cmd->buf = cpu_to_le32(addr);
- cmd->ct = cpu_to_le32(ctx->ct_dma);
- cmd->ct_hdr = ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(ctx->tfm_dma);
-
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
-
- ring->res_pos = ring->cmd_pos;
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
-
- return -EINPROGRESS;
-}
-
-static int mtk_sha_xmit2(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- struct mtk_sha_reqctx *ctx,
- size_t len1, size_t len2)
-{
- struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
- struct mtk_desc *res = ring->res_base + ring->res_pos;
- int err;
+ struct mtk_desc *cmd, *res;
+ int err, count = 0;
- err = mtk_sha_info_update(cryp, sha, len1 + len2);
+ err = mtk_sha_info_update(cryp, sha, len1, len2);
if (err)
return err;
/* Fill in the command/result descriptors */
- res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
- res->buf = cpu_to_le32(cryp->tmp_dma);
+ mtk_sha_ring_shift(ring, &cmd, &res, &count);
- cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST |
+ res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
+ cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
MTK_DESC_CT_LEN(ctx->ct_size);
- cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
+ cmd->buf = cpu_to_le32(addr1);
cmd->ct = cpu_to_le32(ctx->ct_dma);
cmd->ct_hdr = ctx->ct_hdr;
cmd->tfm = cpu_to_le32(ctx->tfm_dma);
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
-
- ring->res_pos = ring->cmd_pos;
-
- cmd = ring->cmd_base + ring->cmd_pos;
- res = ring->res_base + ring->res_pos;
-
- res->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
- res->buf = cpu_to_le32(cryp->tmp_dma);
+ if (len2) {
+ mtk_sha_ring_shift(ring, &cmd, &res, &count);
- cmd->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
- cmd->buf = cpu_to_le32(ctx->dma_addr);
-
- if (++ring->cmd_pos == MTK_DESC_NUM)
- ring->cmd_pos = 0;
+ res->hdr = MTK_DESC_BUF_LEN(len2);
+ cmd->hdr = MTK_DESC_BUF_LEN(len2);
+ cmd->buf = cpu_to_le32(addr2);
+ }
- ring->res_pos = ring->cmd_pos;
+ cmd->hdr |= MTK_DESC_LAST;
+ res->hdr |= MTK_DESC_LAST;
/*
* Make sure that all changes to the DMA ring are done before we
@@ -512,8 +462,8 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
*/
wmb();
/* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
+ mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
+ mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
return -EINPROGRESS;
}
@@ -532,7 +482,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp,
ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
+ return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
}
static int mtk_sha_update_slow(struct mtk_cryp *cryp,
@@ -625,7 +575,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
if (len == 0) {
ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
+ return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
+ count, 0, 0);
} else {
ctx->sg = sg;
@@ -635,7 +586,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
}
ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit2(cryp, sha, ctx, len, count);
+ return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+ len, ctx->dma_addr, count);
}
}
@@ -646,7 +598,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), len);
+ return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+ len, 0, 0);
}
static int mtk_sha_final_req(struct mtk_cryp *cryp,
@@ -668,7 +621,7 @@ static int mtk_sha_final_req(struct mtk_cryp *cryp,
static int mtk_sha_finish(struct ahash_request *req)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- u32 *digest = ctx->info.tfm.digest;
+ __le32 *digest = ctx->info.digest;
u32 *result = (u32 *)req->result;
int i;
@@ -694,7 +647,7 @@ static void mtk_sha_finish_req(struct mtk_cryp *cryp,
sha->req->base.complete(&sha->req->base, err);
/* Handle new request */
- mtk_sha_handle_queue(cryp, sha->id - RING2, NULL);
+ tasklet_schedule(&sha->queue_task);
}
static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
@@ -1216,60 +1169,38 @@ static struct ahash_alg algs_sha384_sha512[] = {
},
};
-static void mtk_sha_task0(unsigned long data)
+static void mtk_sha_queue_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_sha_rec *sha = cryp->sha[0];
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
- mtk_sha_unmap(cryp, sha);
- mtk_sha_complete(cryp, sha);
+ mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
}
-static void mtk_sha_task1(unsigned long data)
+static void mtk_sha_done_task(unsigned long data)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)data;
- struct mtk_sha_rec *sha = cryp->sha[1];
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
+ struct mtk_cryp *cryp = sha->cryp;
mtk_sha_unmap(cryp, sha);
mtk_sha_complete(cryp, sha);
}
-static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id)
+static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_sha_rec *sha = cryp->sha[0];
- u32 val = mtk_sha_read(cryp, RDR_STAT(RING2));
+ struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
+ struct mtk_cryp *cryp = sha->cryp;
+ u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
- mtk_sha_write(cryp, RDR_STAT(RING2), val);
+ mtk_sha_write(cryp, RDR_STAT(sha->id), val);
if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(RING2),
+ mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
+ mtk_sha_write(cryp, RDR_THRESH(sha->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
- tasklet_schedule(&sha->task);
+ tasklet_schedule(&sha->done_task);
} else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
-{
- struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
- struct mtk_sha_rec *sha = cryp->sha[1];
- u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
-
- mtk_sha_write(cryp, RDR_STAT(RING3), val);
-
- if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(RING3),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&sha->task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
+ dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
}
return IRQ_HANDLED;
}
@@ -1288,14 +1219,20 @@ static int mtk_sha_record_init(struct mtk_cryp *cryp)
if (!sha[i])
goto err_cleanup;
- sha[i]->id = i + RING2;
+ sha[i]->cryp = cryp;
spin_lock_init(&sha[i]->lock);
crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
+
+ tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
+ (unsigned long)sha[i]);
+ tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
+ (unsigned long)sha[i]);
}
- tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp);
- tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp);
+ /* Link to ring2 and ring3 respectively */
+ sha[0]->id = MTK_RING2;
+ sha[1]->id = MTK_RING3;
cryp->rec = 1;
@@ -1312,7 +1249,9 @@ static void mtk_sha_record_free(struct mtk_cryp *cryp)
int i;
for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->sha[i]->task);
+ tasklet_kill(&cryp->sha[i]->done_task);
+ tasklet_kill(&cryp->sha[i]->queue_task);
+
kfree(cryp->sha[i]);
}
}
@@ -1368,35 +1307,23 @@ int mtk_hash_alg_register(struct mtk_cryp *cryp)
if (err)
goto err_record;
- /* Ring2 is use by SHA record0 */
- err = devm_request_irq(cryp->dev, cryp->irq[RING2],
- mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
- "mtk-sha", cryp);
+ err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
+ 0, "mtk-sha", cryp->sha[0]);
if (err) {
dev_err(cryp->dev, "unable to request sha irq0.\n");
goto err_res;
}
- /* Ring3 is use by SHA record1 */
- err = devm_request_irq(cryp->dev, cryp->irq[RING3],
- mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
- "mtk-sha", cryp);
+ err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
+ 0, "mtk-sha", cryp->sha[1]);
if (err) {
dev_err(cryp->dev, "unable to request sha irq1.\n");
goto err_res;
}
/* Enable ring2 and ring3 interrupt for hash */
- mtk_sha_write(cryp, AIC_ENABLE_SET(RING2), MTK_IRQ_RDR2);
- mtk_sha_write(cryp, AIC_ENABLE_SET(RING3), MTK_IRQ_RDR3);
-
- cryp->tmp = dma_alloc_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- &cryp->tmp_dma, GFP_KERNEL);
- if (!cryp->tmp) {
- dev_err(cryp->dev, "unable to allocate tmp buffer.\n");
- err = -EINVAL;
- goto err_res;
- }
+ mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
+ mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
spin_lock(&mtk_sha.lock);
list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
@@ -1412,8 +1339,6 @@ err_algs:
spin_lock(&mtk_sha.lock);
list_del(&cryp->sha_list);
spin_unlock(&mtk_sha.lock);
- dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- cryp->tmp, cryp->tmp_dma);
err_res:
mtk_sha_record_free(cryp);
err_record:
@@ -1429,7 +1354,5 @@ void mtk_hash_alg_release(struct mtk_cryp *cryp)
spin_unlock(&mtk_sha.lock);
mtk_sha_unregister_algs();
- dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
- cryp->tmp, cryp->tmp_dma);
mtk_sha_record_free(cryp);
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 0d35dca2e925..2aab80bc241f 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -491,7 +491,7 @@ static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
ctx->g2 = false;
}
-static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);