summaryrefslogtreecommitdiffstats
path: root/crypto/crypto_engine.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r--crypto/crypto_engine.c186
1 files changed, 148 insertions, 38 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 795b6f9412ba..bfb92ace2c91 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -15,13 +15,11 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
-
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
@@ -35,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct ahash_request *hreq;
+ struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
- int ret;
+ int ret, rtype;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -83,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req)
goto out;
- req = ablkcipher_request_cast(async_req);
-
- engine->cur_req = req;
+ engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -96,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
+ rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
@@ -105,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
- if (engine->prepare_request) {
- ret = engine->prepare_request(engine, engine->cur_req);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ if (engine->prepare_hash_request) {
+ ret = engine->prepare_hash_request(engine, hreq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->hash_one_request(engine, hreq);
if (ret) {
- pr_err("failed to prepare request: %d\n", ret);
+ pr_err("failed to hash one request from queue\n");
goto req_err;
}
- engine->cur_req_prepared = true;
- }
-
- ret = engine->crypt_one_request(engine, engine->cur_req);
- if (ret) {
- pr_err("failed to crypt one request from queue\n");
- goto req_err;
+ return;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ if (engine->prepare_cipher_request) {
+ ret = engine->prepare_cipher_request(engine, breq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->cipher_one_request(engine, breq);
+ if (ret) {
+ pr_err("failed to cipher one request from queue\n");
+ goto req_err;
+ }
+ return;
+ default:
+ pr_err("failed to prepare request of unknown type\n");
+ return;
}
- return;
req_err:
- crypto_finalize_request(engine, engine->cur_req, ret);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ crypto_finalize_hash_request(engine, hreq, ret);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ crypto_finalize_cipher_request(engine, breq, ret);
+ break;
+ }
return;
out:
@@ -138,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
- * crypto_transfer_request - transfer the new request into the engine queue
+ * crypto_transfer_cipher_request - transfer the new request into the
+ * enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, bool need_pump)
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req,
+ bool need_pump)
{
unsigned long flags;
int ret;
@@ -163,46 +194,125 @@ int crypto_transfer_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
+
+/**
+ * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return crypto_transfer_cipher_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
+
+/**
+ * crypto_transfer_hash_request - transfer the new request into the
+ * enginequeue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, bool need_pump)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ if (!engine->running) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ ret = ahash_enqueue_request(&engine->queue, req);
+
+ if (!engine->busy && need_pump)
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
/**
- * crypto_transfer_request_to_engine - transfer one request to list into the
- * engine queue
+ * crypto_transfer_hash_request_to_engine - transfer one request to list
+ * into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+ struct ahash_request *req)
{
- return crypto_transfer_request(engine, req, true);
+ return crypto_transfer_hash_request(engine, req, true);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
- * crypto_finalize_request - finalize one request if the request is done
+ * crypto_finalize_cipher_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == req)
+ if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
- if (engine->cur_req_prepared && engine->unprepare_request) {
- ret = engine->unprepare_request(engine, req);
+ if (engine->cur_req_prepared &&
+ engine->unprepare_cipher_request) {
+ ret = engine->unprepare_cipher_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->base.complete(&req->base, err);
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+
+/**
+ * crypto_finalize_hash_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == &req->base)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ if (engine->cur_req_prepared &&
+ engine->unprepare_hash_request) {
+ ret = engine->unprepare_hash_request(engine, req);
+ if (ret)
+ pr_err("failed to unprepare request\n");
+ }
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
@@ -213,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine,
queue_kthread_work(&engine->kworker, &engine->pump_requests);
}
-EXPORT_SYMBOL_GPL(crypto_finalize_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_engine_start - start the hardware engine
@@ -250,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
int crypto_engine_stop(struct crypto_engine *engine)
{
unsigned long flags;
- unsigned limit = 500;
+ unsigned int limit = 500;
int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);