summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/auth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/auth.c')
-rw-r--r--drivers/nvme/host/auth.c258
1 files changed, 136 insertions, 122 deletions
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index c8a6db7c4498..bb0abbe4491c 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -13,6 +13,10 @@
#include "fabrics.h"
#include <linux/nvme-auth.h>
+#define CHAP_BUF_SIZE 4096
+static struct kmem_cache *nvme_chap_buf_cache;
+static mempool_t *nvme_chap_buf_pool;
+
struct nvme_dhchap_queue_context {
struct list_head entry;
struct work_struct auth_work;
@@ -20,7 +24,6 @@ struct nvme_dhchap_queue_context {
struct crypto_shash *shash_tfm;
struct crypto_kpp *dh_tfm;
void *buf;
- size_t buf_size;
int qid;
int error;
u32 s1;
@@ -47,6 +50,12 @@ struct nvme_dhchap_queue_context {
#define nvme_auth_queue_from_qid(ctrl, qid) \
(qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
+ ctrl->opts->nr_poll_queues + 1;
+}
+
static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
@@ -112,7 +121,7 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
- if (chap->buf_size < size) {
+ if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return -EINVAL;
}
@@ -147,7 +156,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
const char *hmac_name, *kpp_name;
- if (chap->buf_size < size) {
+ if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return NVME_SC_INVALID_FIELD;
}
@@ -197,12 +206,6 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
return NVME_SC_AUTH_REQUIRED;
}
- /* Reset host response if the hash had been changed */
- if (chap->hash_id != data->hashid) {
- kfree(chap->host_response);
- chap->host_response = NULL;
- }
-
chap->hash_id = data->hashid;
chap->hash_len = data->hl;
dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
@@ -219,14 +222,6 @@ select_kpp:
return NVME_SC_AUTH_REQUIRED;
}
- /* Clear host and controller key to avoid accidental reuse */
- kfree_sensitive(chap->host_key);
- chap->host_key = NULL;
- chap->host_key_len = 0;
- kfree_sensitive(chap->ctrl_key);
- chap->ctrl_key = NULL;
- chap->ctrl_key_len = 0;
-
if (chap->dhgroup_id == data->dhgid &&
(data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
dev_dbg(ctrl->device,
@@ -302,7 +297,7 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
if (chap->host_key_len)
size += chap->host_key_len;
- if (chap->buf_size < size) {
+ if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return -EINVAL;
}
@@ -344,10 +339,10 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data);
- if (ctrl->ctrl_key)
+ if (chap->ctrl_key)
size += chap->hash_len;
- if (chap->buf_size < size) {
+ if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return NVME_SC_INVALID_FIELD;
}
@@ -521,6 +516,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
ret = PTR_ERR(ctrl_response);
return ret;
}
+
ret = crypto_shash_setkey(chap->shash_tfm,
ctrl_response, ctrl->ctrl_key->len);
if (ret) {
@@ -621,9 +617,6 @@ static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
if (ret) {
dev_dbg(ctrl->device,
"failed to generate public key, error %d\n", ret);
- kfree(chap->host_key);
- chap->host_key = NULL;
- chap->host_key_len = 0;
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return ret;
}
@@ -643,9 +636,6 @@ gen_sesskey:
if (ret) {
dev_dbg(ctrl->device,
"failed to generate shared secret, error %d\n", ret);
- kfree_sensitive(chap->sess_key);
- chap->sess_key = NULL;
- chap->sess_key_len = 0;
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return ret;
}
@@ -654,7 +644,7 @@ gen_sesskey:
return 0;
}
-static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
{
kfree_sensitive(chap->host_response);
chap->host_response = NULL;
@@ -674,24 +664,20 @@ static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
chap->transaction = 0;
memset(chap->c1, 0, sizeof(chap->c1));
memset(chap->c2, 0, sizeof(chap->c2));
+ mempool_free(chap->buf, nvme_chap_buf_pool);
+ chap->buf = NULL;
}
-static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
{
- __nvme_auth_reset(chap);
+ nvme_auth_reset_dhchap(chap);
if (chap->shash_tfm)
crypto_free_shash(chap->shash_tfm);
if (chap->dh_tfm)
crypto_free_kpp(chap->dh_tfm);
- kfree_sensitive(chap->ctrl_key);
- kfree_sensitive(chap->host_key);
- kfree_sensitive(chap->sess_key);
- kfree_sensitive(chap->host_response);
- kfree(chap->buf);
- kfree(chap);
}
-static void __nvme_auth_work(struct work_struct *work)
+static void nvme_queue_auth_work(struct work_struct *work)
{
struct nvme_dhchap_queue_context *chap =
container_of(work, struct nvme_dhchap_queue_context, auth_work);
@@ -699,6 +685,16 @@ static void __nvme_auth_work(struct work_struct *work)
size_t tl;
int ret = 0;
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k is enough to ffdhe8192.
+ */
+ chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
+ if (!chap->buf) {
+ chap->error = -ENOMEM;
+ return;
+ }
+
chap->transaction = ctrl->transaction++;
/* DH-HMAC-CHAP Step 1: send negotiate */
@@ -720,8 +716,9 @@ static void __nvme_auth_work(struct work_struct *work)
dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
__func__, chap->qid);
- memset(chap->buf, 0, chap->buf_size);
- ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ memset(chap->buf, 0, CHAP_BUF_SIZE);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
+ false);
if (ret) {
dev_warn(ctrl->device,
"qid %d failed to receive challenge, %s %d\n",
@@ -757,11 +754,14 @@ static void __nvme_auth_work(struct work_struct *work)
dev_dbg(ctrl->device, "%s: qid %d host response\n",
__func__, chap->qid);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
if (ret) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret;
goto fail2;
}
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@@ -783,8 +783,9 @@ static void __nvme_auth_work(struct work_struct *work)
dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
__func__, chap->qid);
- memset(chap->buf, 0, chap->buf_size);
- ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ memset(chap->buf, 0, CHAP_BUF_SIZE);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
+ false);
if (ret) {
dev_warn(ctrl->device,
"qid %d failed to receive success1, %s %d\n",
@@ -801,16 +802,19 @@ static void __nvme_auth_work(struct work_struct *work)
return;
}
+ mutex_lock(&ctrl->dhchap_auth_mutex);
if (ctrl->ctrl_key) {
dev_dbg(ctrl->device,
"%s: qid %d controller response\n",
__func__, chap->qid);
ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
if (ret) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret;
goto fail2;
}
}
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) {
@@ -819,7 +823,7 @@ static void __nvme_auth_work(struct work_struct *work)
goto fail2;
}
- if (ctrl->ctrl_key) {
+ if (chap->ctrl_key) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
@@ -860,42 +864,8 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
return -ENOKEY;
}
- mutex_lock(&ctrl->dhchap_auth_mutex);
- /* Check if the context is already queued */
- list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
- WARN_ON(!chap->buf);
- if (chap->qid == qid) {
- dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- flush_work(&chap->auth_work);
- __nvme_auth_reset(chap);
- queue_work(nvme_wq, &chap->auth_work);
- return 0;
- }
- }
- chap = kzalloc(sizeof(*chap), GFP_KERNEL);
- if (!chap) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- return -ENOMEM;
- }
- chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
- chap->ctrl = ctrl;
-
- /*
- * Allocate a large enough buffer for the entire negotiation:
- * 4k should be enough to ffdhe8192.
- */
- chap->buf_size = 4096;
- chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
- if (!chap->buf) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- kfree(chap);
- return -ENOMEM;
- }
-
- INIT_WORK(&chap->auth_work, __nvme_auth_work);
- list_add(&chap->entry, &ctrl->dhchap_auth_list);
- mutex_unlock(&ctrl->dhchap_auth_mutex);
+ chap = &ctrl->dhchap_ctxs[qid];
+ cancel_work_sync(&chap->auth_work);
queue_work(nvme_wq, &chap->auth_work);
return 0;
}
@@ -906,40 +876,28 @@ int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
struct nvme_dhchap_queue_context *chap;
int ret;
- mutex_lock(&ctrl->dhchap_auth_mutex);
- list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
- if (chap->qid != qid)
- continue;
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- flush_work(&chap->auth_work);
- ret = chap->error;
- return ret;
- }
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- return -ENXIO;
+ chap = &ctrl->dhchap_ctxs[qid];
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ /* clear sensitive info */
+ nvme_auth_reset_dhchap(chap);
+ return ret;
}
EXPORT_SYMBOL_GPL(nvme_auth_wait);
-void nvme_auth_reset(struct nvme_ctrl *ctrl)
-{
- struct nvme_dhchap_queue_context *chap;
-
- mutex_lock(&ctrl->dhchap_auth_mutex);
- list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- flush_work(&chap->auth_work);
- __nvme_auth_reset(chap);
- }
- mutex_unlock(&ctrl->dhchap_auth_mutex);
-}
-EXPORT_SYMBOL_GPL(nvme_auth_reset);
-
-static void nvme_dhchap_auth_work(struct work_struct *work)
+static void nvme_ctrl_auth_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, dhchap_auth_work);
int ret, q;
+ /*
+ * If the ctrl is no connected, bail as reconnect will handle
+ * authentication.
+ */
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
/* Authenticate admin queue first */
ret = nvme_auth_negotiate(ctrl, 0);
if (ret) {
@@ -968,43 +926,75 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
* Failure is a soft-state; credentials remain valid until
* the controller terminates the connection.
*/
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_wait(ctrl, q);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %d: authentication failed\n", q);
+ }
}
-void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
- INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
- INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ struct nvme_dhchap_queue_context *chap;
+ int i, ret;
+
mutex_init(&ctrl->dhchap_auth_mutex);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
if (!ctrl->opts)
- return;
- nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
- nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+ return 0;
+ ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
+ &ctrl->host_key);
+ if (ret)
+ return ret;
+ ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
+ &ctrl->ctrl_key);
+ if (ret)
+ goto err_free_dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
+ return ret;
+
+ ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
+ sizeof(*chap), GFP_KERNEL);
+ if (!ctrl->dhchap_ctxs) {
+ ret = -ENOMEM;
+ goto err_free_dhchap_ctrl_secret;
+ }
+
+ for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
+ chap = &ctrl->dhchap_ctxs[i];
+ chap->qid = i;
+ chap->ctrl = ctrl;
+ INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
+ }
+
+ return 0;
+err_free_dhchap_ctrl_secret:
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+err_free_dhchap_secret:
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ return ret;
}
EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
void nvme_auth_stop(struct nvme_ctrl *ctrl)
{
- struct nvme_dhchap_queue_context *chap = NULL, *tmp;
-
cancel_work_sync(&ctrl->dhchap_auth_work);
- mutex_lock(&ctrl->dhchap_auth_mutex);
- list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
- cancel_work_sync(&chap->auth_work);
- mutex_unlock(&ctrl->dhchap_auth_mutex);
}
EXPORT_SYMBOL_GPL(nvme_auth_stop);
void nvme_auth_free(struct nvme_ctrl *ctrl)
{
- struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+ int i;
- mutex_lock(&ctrl->dhchap_auth_mutex);
- list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
- list_del_init(&chap->entry);
- flush_work(&chap->auth_work);
- __nvme_auth_free(chap);
+ if (ctrl->dhchap_ctxs) {
+ for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
+ nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
+ kfree(ctrl->dhchap_ctxs);
}
- mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ctrl->host_key) {
nvme_auth_free_key(ctrl->host_key);
ctrl->host_key = NULL;
@@ -1015,3 +1005,27 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
}
}
EXPORT_SYMBOL_GPL(nvme_auth_free);
+
+int __init nvme_init_auth(void)
+{
+ nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
+ CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!nvme_chap_buf_cache)
+ return -ENOMEM;
+
+ nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
+ mempool_free_slab, nvme_chap_buf_cache);
+ if (!nvme_chap_buf_pool)
+ goto err_destroy_chap_buf_cache;
+
+ return 0;
+err_destroy_chap_buf_cache:
+ kmem_cache_destroy(nvme_chap_buf_cache);
+ return -ENOMEM;
+}
+
+void __exit nvme_exit_auth(void)
+{
+ mempool_destroy(nvme_chap_buf_pool);
+ kmem_cache_destroy(nvme_chap_buf_cache);
+}