summaryrefslogtreecommitdiffstats
path: root/drivers/tee
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee')
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/call.c220
-rw-r--r--drivers/tee/optee/core.c163
-rw-r--r--drivers/tee/optee/optee_msg.h38
-rw-r--r--drivers/tee/optee/optee_private.h72
-rw-r--r--drivers/tee/optee/optee_smc.h7
-rw-r--r--drivers/tee/optee/rpc.c81
-rw-r--r--drivers/tee/optee/shm_pool.c75
-rw-r--r--drivers/tee/optee/shm_pool.h23
-rw-r--r--drivers/tee/optee/supp.c375
-rw-r--r--drivers/tee/tee_core.c113
-rw-r--r--drivers/tee/tee_private.h60
-rw-r--r--drivers/tee/tee_shm.c230
-rw-r--r--drivers/tee/tee_shm_pool.c165
14 files changed, 1216 insertions, 407 deletions
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index d526fb88d9c5..48d262ae2f04 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,3 +4,4 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
+optee-objs += shm_pool.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index f7b7b404c990..a5afbe6dee68 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -135,6 +136,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
u32 ret;
param.a0 = OPTEE_SMC_CALL_WITH_ARG;
@@ -159,13 +161,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
param.a1 = res.a1;
param.a2 = res.a2;
param.a3 = res.a3;
- optee_handle_rpc(ctx, &param);
+ optee_handle_rpc(ctx, &param, &call_ctx);
} else {
ret = res.a0;
break;
}
}
+ optee_rpc_finalize_call(&call_ctx);
/*
* We're done with our thread in secure world, if there's any
* thread waiters wake up one.
@@ -442,3 +445,218 @@ void optee_disable_shm_cache(struct optee *optee)
}
optee_cq_wait_final(&optee->call_queue, &w);
}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+static bool is_normal_memory(pgprot_t p)
+{
+#if defined(CONFIG_ARM)
+ return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
+#elif defined(CONFIG_ARM64)
+ return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+#else
+#error "Unuspported architecture"
+#endif
+}
+
+static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+{
+ while (vma && is_normal_memory(vma->vm_page_prot)) {
+ if (vma->vm_end >= end)
+ return 0;
+ vma = vma->vm_next;
+ }
+
+ return -EINVAL;
+}
+
+static int check_mem_type(unsigned long start, size_t num_pages)
+{
+ struct mm_struct *mm = current->mm;
+ int rc;
+
+ down_read(&mm->mmap_sem);
+ rc = __check_mem_type(find_vma(mm, start),
+ start + num_pages * PAGE_SIZE);
+ up_read(&mm->mmap_sem);
+
+ return rc;
+}
+
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct tee_shm *shm_arg = NULL;
+ struct optee_msg_arg *msg_arg;
+ u64 *pages_list;
+ phys_addr_t msg_parg;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee_do_call_with_arg(ctx, msg_parg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_shm *shm_arg;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ int rc = 0;
+
+ shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee_do_call_with_arg(ctx, msg_parg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return check_mem_type(start, num_pages);
+}
+
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
+{
+ return 0;
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index edb6e4e9ef3a..e9843c53fe31 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include "optee_private.h"
#include "optee_smc.h"
+#include "shm_pool.h"
#define DRIVER_NAME "optee"
@@ -97,6 +98,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return rc;
}
break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)
+ mp->u.rmem.shm_ref;
+
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ break;
+ }
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+
+ break;
+
default:
return -EINVAL;
}
@@ -104,6 +124,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return 0;
}
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
/**
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
* @msg_params: OPTEE_MSG parameters
@@ -116,7 +176,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
{
int rc;
size_t n;
- phys_addr_t pa;
for (n = 0; n < num_params; n++) {
const struct tee_param *p = params + n;
@@ -139,22 +198,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
- p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
- mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.tmem.size = p->u.memref.size;
- if (!p->u.memref.shm) {
- mp->u.tmem.buf_ptr = 0;
- break;
- }
- rc = tee_shm_get_pa(p->u.memref.shm,
- p->u.memref.shm_offs, &pa);
+ if (tee_shm_is_registered(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
if (rc)
return rc;
- mp->u.tmem.buf_ptr = pa;
- mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
- OPTEE_MSG_ATTR_CACHE_SHIFT;
break;
default:
return -EINVAL;
@@ -171,6 +220,10 @@ static void optee_get_version(struct tee_device *teedev,
.impl_caps = TEE_OPTEE_CAP_TZ,
.gen_caps = TEE_GEN_CAP_GP,
};
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
*vers = v;
}
@@ -187,12 +240,12 @@ static int optee_open(struct tee_context *ctx)
if (teedev == optee->supp_teedev) {
bool busy = true;
- mutex_lock(&optee->supp.ctx_mutex);
+ mutex_lock(&optee->supp.mutex);
if (!optee->supp.ctx) {
busy = false;
optee->supp.ctx = ctx;
}
- mutex_unlock(&optee->supp.ctx_mutex);
+ mutex_unlock(&optee->supp.mutex);
if (busy) {
kfree(ctxdata);
return -EBUSY;
@@ -252,11 +305,8 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL;
- if (teedev == optee->supp_teedev) {
- mutex_lock(&optee->supp.ctx_mutex);
- optee->supp.ctx = NULL;
- mutex_unlock(&optee->supp.ctx_mutex);
- }
+ if (teedev == optee->supp_teedev)
+ optee_supp_release(&optee->supp);
}
static const struct tee_driver_ops optee_ops = {
@@ -267,6 +317,8 @@ static const struct tee_driver_ops optee_ops = {
.close_session = optee_close_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
};
static const struct tee_desc optee_desc = {
@@ -281,6 +333,8 @@ static const struct tee_driver_ops optee_supp_ops = {
.release = optee_release,
.supp_recv = optee_supp_recv,
.supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
};
static const struct tee_desc optee_supp_desc = {
@@ -345,21 +399,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
}
static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
+ u32 sec_caps)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_get_shm_config_result result;
} res;
- struct tee_shm_pool *pool;
unsigned long vaddr;
phys_addr_t paddr;
size_t size;
phys_addr_t begin;
phys_addr_t end;
void *va;
- struct tee_shm_pool_mem_info priv_info;
- struct tee_shm_pool_mem_info dmabuf_info;
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
@@ -389,22 +444,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
}
vaddr = (unsigned long)va;
- priv_info.vaddr = vaddr;
- priv_info.paddr = paddr;
- priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
- if (IS_ERR(pool)) {
- memunmap(va);
- goto out;
+ /*
+ * If OP-TEE can work with unregistered SHM, we will use own pool
+ * for private shm
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+ } else {
+ const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
}
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
*memremaped_shm = va;
-out:
- return pool;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+ memunmap(va);
+ return rc;
}
/* Simple wrapper functions to be able to use a function pointer */
@@ -482,7 +564,7 @@ static struct optee *optee_probe(struct device_node *np)
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
return ERR_PTR(-EINVAL);
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool))
return (void *)pool;
@@ -493,6 +575,7 @@ static struct optee *optee_probe(struct device_node *np)
}
optee->invoke_fn = invoke_fn;
+ optee->sec_caps = sec_caps;
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index dd7a06ee0462..30504901be80 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -67,11 +67,32 @@
#define OPTEE_MSG_ATTR_META BIT(8)
/*
- * The temporary shared memory object is not physically contigous and this
- * temp memref is followed by another fragment until the last temp memref
- * that doesn't have this bit set.
+ * Pointer to a list of pages used to register user-defined SHM buffer.
+ * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
+ * buf_ptr should point to the beginning of the buffer. Buffer will contain
+ * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
+ * that page addresses list. Page addresses are stored as 64 bit values.
+ * Last entry on a page should point to the next page of buffer.
+ * Every entry in buffer should point to a 4k page beginning (12 least
+ * significant bits must be equal to zero).
+ *
+ * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
+ * offset of the user buffer.
+ *
+ * So, entries should be placed like members of this structure:
+ *
+ * struct page_data {
+ * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
+ * uint64_t next_page_data;
+ * };
+ *
+ * Structure is designed to exactly fit into the page size
+ * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
+ *
+ * The size of 4KB is chosen because this is the smallest page size for ARM
+ * architectures. If REE uses larger pages, it should divide them to 4KB ones.
*/
-#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
+#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
/*
* Memory attributes for caching passed with temp memrefs. The actual value
@@ -94,6 +115,11 @@
#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+/*
+ * Page size used in non-contiguous buffer entries
+ */
+#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+
/**
* struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer
@@ -145,8 +171,8 @@ struct optee_msg_param_value {
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
- * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
- * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/
struct optee_msg_param {
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index c374cd594314..35e79386c556 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,36 +53,24 @@ struct optee_wait_queue {
* @ctx the context of current connected supplicant.
* if !NULL the supplicant device is available for use,
* else busy
- * @ctx_mutex: held while accessing @ctx
- * @func: supplicant function id to call
- * @ret: call return value
- * @num_params: number of elements in @param
- * @param: parameters for @func
- * @req_posted: if true, a request has been posted to the supplicant
- * @supp_next_send: if true, next step is for supplicant to send response
- * @thrd_mutex: held by the thread doing a request to supplicant
- * @supp_mutex: held by supplicant while operating on this struct
- * @data_to_supp: supplicant is waiting on this for next request
- * @data_from_supp: requesting thread is waiting on this to get the result
+ * @mutex: held while accessing content of this struct
+ * @req_id: current request id if supplicant is doing synchronous
+ * communication, else -1
+ * @reqs: queued request not yet retrieved by supplicant
+ * @idr: IDR holding all requests currently being processed
+ * by supplicant
+ * @reqs_c: completion used by supplicant when waiting for a
+ * request to be queued.
*/
struct optee_supp {
+ /* Serializes access to this struct */
+ struct mutex mutex;
struct tee_context *ctx;
- /* Serializes access of ctx */
- struct mutex ctx_mutex;
-
- u32 func;
- u32 ret;
- size_t num_params;
- struct tee_param *param;
-
- bool req_posted;
- bool supp_next_send;
- /* Serializes access to this struct for requesting thread */
- struct mutex thrd_mutex;
- /* Serializes access to this struct for supplicant threads */
- struct mutex supp_mutex;
- struct completion data_to_supp;
- struct completion data_from_supp;
+
+ int req_id;
+ struct list_head reqs;
+ struct idr idr;
+ struct completion reqs_c;
};
/**
@@ -96,6 +84,8 @@ struct optee_supp {
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
* @memremaped_shm virtual address of memory in shared memory pool
+ * @sec_caps: secure world capabilities defined by
+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
*/
struct optee {
struct tee_device *supp_teedev;
@@ -106,6 +96,7 @@ struct optee {
struct optee_supp supp;
struct tee_shm_pool *pool;
void *memremaped_shm;
+ u32 sec_caps;
};
struct optee_session {
@@ -130,7 +121,16 @@ struct optee_rpc_param {
u32 a7;
};
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
+/* Holds context that is preserved during one STD call */
+struct optee_call_ctx {
+ /* information about pages list used in last allocation */
+ void *pages_list;
+ size_t num_entries;
+};
+
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx);
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
void optee_wait_queue_init(struct optee_wait_queue *wq);
void optee_wait_queue_exit(struct optee_wait_queue *wq);
@@ -142,6 +142,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
+void optee_supp_release(struct optee_supp *supp);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
@@ -160,11 +161,26 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
void optee_enable_shm_cache(struct optee *optee);
void optee_disable_shm_cache(struct optee *optee);
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
+
int optee_from_msg_param(struct tee_param *params, size_t num_params,
const struct optee_msg_param *msg_params);
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
const struct tee_param *params);
+u64 *optee_allocate_pages_list(size_t num_entries);
+void optee_free_pages_list(void *array, size_t num_entries);
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 069c8e1429de..7cd327243ada 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -222,6 +222,13 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+
+/*
+ * Secure world supports commands "register/unregister shared memory",
+ * secure world accepts command buffers located in any parts of non-secure RAM
+ */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
+
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index cef417f4f4d2..41aea12e2bcc 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -192,15 +192,16 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
if (ret)
return ERR_PTR(-ENOMEM);
- mutex_lock(&optee->supp.ctx_mutex);
+ mutex_lock(&optee->supp.mutex);
/* Increases count as secure world doesn't have a reference */
shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
- mutex_unlock(&optee->supp.ctx_mutex);
+ mutex_unlock(&optee->supp.mutex);
return shm;
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
- struct optee_msg_arg *arg)
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
{
phys_addr_t pa;
struct tee_shm *shm;
@@ -245,10 +246,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
goto bad;
}
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
- arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_registered(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
arg->ret = TEEC_SUCCESS;
return;
bad:
@@ -307,8 +347,24 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
arg->ret = TEEC_SUCCESS;
}
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
- struct tee_shm *shm)
+ struct tee_shm *shm,
+ struct optee_call_ctx *call_ctx)
{
struct optee_msg_arg *arg;
@@ -329,7 +385,8 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
handle_rpc_func_cmd_wait(arg);
break;
case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
- handle_rpc_func_cmd_shm_alloc(ctx, arg);
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
break;
case OPTEE_MSG_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -343,10 +400,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
* optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC
* @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
*
* Result of RPC is written back into @param.
*/
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
{
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
@@ -381,7 +440,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
break;
case OPTEE_SMC_RPC_FUNC_CMD:
shm = reg_pair_to_ptr(param->a1, param->a2);
- handle_rpc_func_cmd(ctx, optee, shm);
+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
break;
default:
pr_warn("Unknown RPC func 0x%x\n",
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
new file mode 100644
index 000000000000..49397813fff1
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2017, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "shm_pool.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ unsigned int order = get_order(size);
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
new file mode 100644
index 000000000000..4e753c3bf7ec
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SHM_POOL_H
+#define SHM_POOL_H
+
+#include <linux/tee_drv.h>
+
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
+
+#endif
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index b4ea0678a436..df35fc01fd3e 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -16,21 +16,61 @@
#include <linux/uaccess.h>
#include "optee_private.h"
+struct optee_supp_req {
+ struct list_head link;
+
+ bool busy;
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ struct completion c;
+};
+
void optee_supp_init(struct optee_supp *supp)
{
memset(supp, 0, sizeof(*supp));
- mutex_init(&supp->ctx_mutex);
- mutex_init(&supp->thrd_mutex);
- mutex_init(&supp->supp_mutex);
- init_completion(&supp->data_to_supp);
- init_completion(&supp->data_from_supp);
+ mutex_init(&supp->mutex);
+ init_completion(&supp->reqs_c);
+ idr_init(&supp->idr);
+ INIT_LIST_HEAD(&supp->reqs);
+ supp->req_id = -1;
}
void optee_supp_uninit(struct optee_supp *supp)
{
- mutex_destroy(&supp->ctx_mutex);
- mutex_destroy(&supp->thrd_mutex);
- mutex_destroy(&supp->supp_mutex);
+ mutex_destroy(&supp->mutex);
+ idr_destroy(&supp->idr);
+}
+
+void optee_supp_release(struct optee_supp *supp)
+{
+ int id;
+ struct optee_supp_req *req;
+ struct optee_supp_req *req_tmp;
+
+ mutex_lock(&supp->mutex);
+
+ /* Abort all request retrieved by supplicant */
+ idr_for_each_entry(&supp->idr, req, id) {
+ req->busy = false;
+ idr_remove(&supp->idr, id);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ /* Abort all queued requests */
+ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+ list_del(&req->link);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ supp->ctx = NULL;
+ supp->req_id = -1;
+
+ mutex_unlock(&supp->mutex);
}
/**
@@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp)
*/
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param)
+
{
- bool interruptable;
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ bool interruptable;
u32 ret;
- /*
- * Other threads blocks here until we've copied our answer from
- * supplicant.
- */
- while (mutex_lock_interruptible(&supp->thrd_mutex)) {
- /* See comment below on when the RPC can be interrupted. */
- mutex_lock(&supp->ctx_mutex);
- interruptable = !supp->ctx;
- mutex_unlock(&supp->ctx_mutex);
- if (interruptable)
- return TEEC_ERROR_COMMUNICATION;
- }
+ if (!req)
+ return TEEC_ERROR_OUT_OF_MEMORY;
- /*
- * We have exclusive access now since the supplicant at this
- * point is either doing a
- * wait_for_completion_interruptible(&supp->data_to_supp) or is in
- * userspace still about to do the ioctl() to enter
- * optee_supp_recv() below.
- */
+ init_completion(&req->c);
+ req->func = func;
+ req->num_params = num_params;
+ req->param = param;
- supp->func = func;
- supp->num_params = num_params;
- supp->param = param;
- supp->req_posted = true;
+ /* Insert the request in the request list */
+ mutex_lock(&supp->mutex);
+ list_add_tail(&req->link, &supp->reqs);
+ mutex_unlock(&supp->mutex);
- /* Let supplicant get the data */
- complete(&supp->data_to_supp);
+ /* Tell an eventual waiter there's a new request */
+ complete(&supp->reqs_c);
/*
* Wait for supplicant to process and return result, once we've
- * returned from wait_for_completion(data_from_supp) we have
+ * returned from wait_for_completion(&req->c) successfully we have
* exclusive access again.
*/
- while (wait_for_completion_interruptible(&supp->data_from_supp)) {
- mutex_lock(&supp->ctx_mutex);
+ while (wait_for_completion_interruptible(&req->c)) {
+ mutex_lock(&supp->mutex);
interruptable = !supp->ctx;
if (interruptable) {
/*
* There's no supplicant available and since the
- * supp->ctx_mutex currently is held none can
+ * supp->mutex currently is held none can
* become available until the mutex released
* again.
*
@@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and
* interrupting then wouldn't make sense.
*/
- supp->ret = TEEC_ERROR_COMMUNICATION;
- init_completion(&supp->data_to_supp);
+ interruptable = !req->busy;
+ if (!req->busy)
+ list_del(&req->link);
}
- mutex_unlock(&supp->ctx_mutex);
- if (interruptable)
+ mutex_unlock(&supp->mutex);
+
+ if (interruptable) {
+ req->ret = TEEC_ERROR_COMMUNICATION;
break;
+ }
}
- ret = supp->ret;
- supp->param = NULL;
- supp->req_posted = false;
-
- /* We're done, let someone else talk to the supplicant now. */
- mutex_unlock(&supp->thrd_mutex);
+ ret = req->ret;
+ kfree(req);
return ret;
}
+static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
+ int num_params, int *id)
+{
+ struct optee_supp_req *req;
+
+ if (supp->req_id != -1) {
+ /*
+ * Supplicant should not mix synchronous and asnynchronous
+ * requests.
+ */
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (list_empty(&supp->reqs))
+ return NULL;
+
+ req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
+
+ if (num_params < req->num_params) {
+ /* Not enough room for parameters */
+ return ERR_PTR(-EINVAL);
+ }
+
+ *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
+ if (*id < 0)
+ return ERR_PTR(-ENOMEM);
+
+ list_del(&req->link);
+ req->busy = true;
+
+ return req;
+}
+
+static int supp_check_recv_params(size_t num_params, struct tee_param *params,
+ size_t *num_meta)
+{
+ size_t n;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /*
+ * If there's memrefs we need to decrease those as they where
+ * increased earlier and we'll even refuse to accept any below.
+ */
+ for (n = 0; n < num_params; n++)
+ if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+
+ /*
+ * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
+ * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
+ */
+ for (n = 0; n < num_params; n++)
+ if (params[n].attr &&
+ params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
+ return -EINVAL;
+
+ /* At most we'll need one meta parameter so no need to check for more */
+ if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
+ *num_meta = 1;
+ else
+ *num_meta = 0;
+
+ return 0;
+}
+
/**
* optee_supp_recv() - receive request for supplicant
* @ctx: context receiving the request
@@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = NULL;
+ int id;
+ size_t num_meta;
int rc;
- /*
- * In case two threads in one supplicant is calling this function
- * simultaneously we need to protect the data with a mutex which
- * we'll release before returning.
- */
- mutex_lock(&supp->supp_mutex);
+ rc = supp_check_recv_params(*num_params, param, &num_meta);
+ if (rc)
+ return rc;
+
+ while (true) {
+ mutex_lock(&supp->mutex);
+ req = supp_pop_entry(supp, *num_params - num_meta, &id);
+ mutex_unlock(&supp->mutex);
+
+ if (req) {
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ break;
+ }
- if (supp->supp_next_send) {
/*
- * optee_supp_recv() has been called again without
- * a optee_supp_send() in between. Supplicant has
- * probably been restarted before it was able to
- * write back last result. Abort last request and
- * wait for a new.
+ * If we didn't get a request we'll block in
+ * wait_for_completion() to avoid needless spinning.
+ *
+ * This is where supplicant will be hanging most of
+ * the time, let's make this interruptable so we
+ * can easily restart supplicant if needed.
*/
- if (supp->req_posted) {
- supp->ret = TEEC_ERROR_COMMUNICATION;
- supp->supp_next_send = false;
- complete(&supp->data_from_supp);
- }
+ if (wait_for_completion_interruptible(&supp->reqs_c))
+ return -ERESTARTSYS;
}
- /*
- * This is where supplicant will be hanging most of the
- * time, let's make this interruptable so we can easily
- * restart supplicant if needed.
- */
- if (wait_for_completion_interruptible(&supp->data_to_supp)) {
- rc = -ERESTARTSYS;
- goto out;
+ if (num_meta) {
+ /*
+ * tee-supplicant support meta parameters -> requsts can be
+ * processed asynchronously.
+ */
+ param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+ param->u.value.a = id;
+ param->u.value.b = 0;
+ param->u.value.c = 0;
+ } else {
+ mutex_lock(&supp->mutex);
+ supp->req_id = id;
+ mutex_unlock(&supp->mutex);
}
- /* We have exlusive access to the data */
+ *func = req->func;
+ *num_params = req->num_params + num_meta;
+ memcpy(param + num_meta, req->param,
+ sizeof(struct tee_param) * req->num_params);
- if (*num_params < supp->num_params) {
- /*
- * Not enough room for parameters, tell supplicant
- * it failed and abort last request.
- */
- supp->ret = TEEC_ERROR_COMMUNICATION;
- rc = -EINVAL;
- complete(&supp->data_from_supp);
- goto out;
+ return 0;
+}
+
+static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+ size_t num_params,
+ struct tee_param *param,
+ size_t *num_meta)
+{
+ struct optee_supp_req *req;
+ int id;
+ size_t nm;
+ const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+
+ if (!num_params)
+ return ERR_PTR(-EINVAL);
+
+ if (supp->req_id == -1) {
+ if (param->attr != attr)
+ return ERR_PTR(-EINVAL);
+ id = param->u.value.a;
+ nm = 1;
+ } else {
+ id = supp->req_id;
+ nm = 0;
}
- *func = supp->func;
- *num_params = supp->num_params;
- memcpy(param, supp->param,
- sizeof(struct tee_param) * supp->num_params);
+ req = idr_find(&supp->idr, id);
+ if (!req)
+ return ERR_PTR(-ENOENT);
- /* Allow optee_supp_send() below to do its work */
- supp->supp_next_send = true;
+ if ((num_params - nm) != req->num_params)
+ return ERR_PTR(-EINVAL);
- rc = 0;
-out:
- mutex_unlock(&supp->supp_mutex);
- return rc;
+ req->busy = false;
+ idr_remove(&supp->idr, id);
+ supp->req_id = -1;
+ *num_meta = nm;
+
+ return req;
}
/**
@@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
size_t n;
- int rc = 0;
+ size_t num_meta;
- /*
- * We still have exclusive access to the data since that's how we
- * left it when returning from optee_supp_read().
- */
-
- /* See comment on mutex in optee_supp_read() above */
- mutex_lock(&supp->supp_mutex);
-
- if (!supp->supp_next_send) {
- /*
- * Something strange is going on, supplicant shouldn't
- * enter optee_supp_send() in this state
- */
- rc = -ENOENT;
- goto out;
- }
+ mutex_lock(&supp->mutex);
+ req = supp_pop_req(supp, num_params, param, &num_meta);
+ mutex_unlock(&supp->mutex);
- if (num_params != supp->num_params) {
- /*
- * Something is wrong, let supplicant restart. Next call to
- * optee_supp_recv() will give an error to the requesting
- * thread and release it.
- */
- rc = -EINVAL;
- goto out;
+ if (IS_ERR(req)) {
+ /* Something is wrong, let supplicant restart. */
+ return PTR_ERR(req);
}
/* Update out and in/out parameters */
- for (n = 0; n < num_params; n++) {
- struct tee_param *p = supp->param + n;
+ for (n = 0; n < req->num_params; n++) {
+ struct tee_param *p = req->param + n;
- switch (p->attr) {
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
- p->u.value.a = param[n].u.value.a;
- p->u.value.b = param[n].u.value.b;
- p->u.value.c = param[n].u.value.c;
+ p->u.value.a = param[n + num_meta].u.value.a;
+ p->u.value.b = param[n + num_meta].u.value.b;
+ p->u.value.c = param[n + num_meta].u.value.c;
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- p->u.memref.size = param[n].u.memref.size;
+ p->u.memref.size = param[n + num_meta].u.memref.size;
break;
default:
break;
}
}
- supp->ret = ret;
-
- /* Allow optee_supp_recv() above to do its work */
- supp->supp_next_send = false;
+ req->ret = ret;
/* Let the requesting thread continue */
- complete(&supp->data_from_supp);
-out:
- mutex_unlock(&supp->supp_mutex);
- return rc;
+ complete(&req->c);
+
+ return 0;
}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 58a5009eacc3..6c4b200a4560 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -54,6 +54,7 @@ static int tee_open(struct inode *inode, struct file *filp)
goto err;
}
+ kref_init(&ctx->refcount);
ctx->teedev = teedev;
INIT_LIST_HEAD(&ctx->list_shm);
filp->private_data = ctx;
@@ -68,19 +69,40 @@ err:
return rc;
}
-static int tee_release(struct inode *inode, struct file *filp)
+void teedev_ctx_get(struct tee_context *ctx)
{
- struct tee_context *ctx = filp->private_data;
- struct tee_device *teedev = ctx->teedev;
- struct tee_shm *shm;
+ if (ctx->releasing)
+ return;
+ kref_get(&ctx->refcount);
+}
+
+static void teedev_ctx_release(struct kref *ref)
+{
+ struct tee_context *ctx = container_of(ref, struct tee_context,
+ refcount);
+ ctx->releasing = true;
ctx->teedev->desc->ops->release(ctx);
- mutex_lock(&ctx->teedev->mutex);
- list_for_each_entry(shm, &ctx->list_shm, link)
- shm->ctx = NULL;
- mutex_unlock(&ctx->teedev->mutex);
kfree(ctx);
- tee_device_put(teedev);
+}
+
+void teedev_ctx_put(struct tee_context *ctx)
+{
+ if (ctx->releasing)
+ return;
+
+ kref_put(&ctx->refcount, teedev_ctx_release);
+}
+
+static void teedev_close_context(struct tee_context *ctx)
+{
+ tee_device_put(ctx->teedev);
+ teedev_ctx_put(ctx);
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ teedev_close_context(filp->private_data);
return 0;
}
@@ -114,8 +136,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
if (data.flags)
return -EINVAL;
- data.id = -1;
-
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(shm))
return PTR_ERR(shm);
@@ -138,6 +158,43 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
return ret;
}
+static int
+tee_ioctl_shm_register(struct tee_context *ctx,
+ struct tee_ioctl_shm_register_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_register_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_register(ctx, data.addr, data.length,
+ TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.length = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t num_params,
struct tee_ioctl_param __user *uparams)
@@ -152,11 +209,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
return -EFAULT;
/* All unused attribute bits has to be zero */
- if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL;
params[n].attr = ip.attr;
- switch (ip.attr) {
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
break;
@@ -221,18 +278,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
return 0;
}
-static bool param_is_memref(struct tee_param *param)
-{
- switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- return true;
- default:
- return false;
- }
-}
-
static int tee_ioctl_open_session(struct tee_context *ctx,
struct tee_ioctl_buf_data __user *ubuf)
{
@@ -296,7 +341,7 @@ out:
if (params) {
/* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++)
- if (param_is_memref(params + n) &&
+ if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
kfree(params);
@@ -358,7 +403,7 @@ out:
if (params) {
/* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++)
- if (param_is_memref(params + n) &&
+ if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
kfree(params);
@@ -406,8 +451,8 @@ static int params_to_supp(struct tee_context *ctx,
struct tee_ioctl_param ip;
struct tee_param *p = params + n;
- ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
- switch (p->attr) {
+ ip.attr = p->attr;
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
ip.a = p->u.value.a;
@@ -471,6 +516,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (!params)
return -ENOMEM;
+ rc = params_from_user(ctx, params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
if (rc)
goto out;
@@ -500,11 +549,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
return -EFAULT;
/* All unused attribute bits has to be zero */
- if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL;
p->attr = ip.attr;
- switch (ip.attr) {
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
/* Only out and in/out values can be updated */
@@ -586,6 +635,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return tee_ioctl_version(ctx, uarg);
case TEE_IOC_SHM_ALLOC:
return tee_ioctl_shm_alloc(ctx, uarg);
+ case TEE_IOC_SHM_REGISTER:
+ return tee_ioctl_shm_register(ctx, uarg);
case TEE_IOC_OPEN_SESSION:
return tee_ioctl_open_session(ctx, uarg);
case TEE_IOC_INVOKE:
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 21cb6be8bce9..85d99d621603 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -21,68 +21,15 @@
#include <linux/mutex.h>
#include <linux/types.h>
-struct tee_device;
-
-/**
- * struct tee_shm - shared memory object
- * @teedev: device used to allocate the object
- * @ctx: context using the object, if NULL the context is gone
- * @link link element
- * @paddr: physical address of the shared memory
- * @kaddr: virtual address of the shared memory
- * @size: size of shared memory
- * @dmabuf: dmabuf used to for exporting to user space
- * @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
- */
-struct tee_shm {
- struct tee_device *teedev;
- struct tee_context *ctx;
- struct list_head link;
- phys_addr_t paddr;
- void *kaddr;
- size_t size;
- struct dma_buf *dmabuf;
- u32 flags;
- int id;
-};
-
-struct tee_shm_pool_mgr;
-
-/**
- * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
- * @alloc: called when allocating shared memory
- * @free: called when freeing shared memory
- */
-struct tee_shm_pool_mgr_ops {
- int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
- size_t size);
- void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
-};
-
-/**
- * struct tee_shm_pool_mgr - shared memory manager
- * @ops: operations
- * @private_data: private data for the shared memory manager
- */
-struct tee_shm_pool_mgr {
- const struct tee_shm_pool_mgr_ops *ops;
- void *private_data;
-};
-
/**
* struct tee_shm_pool - shared memory pool
* @private_mgr: pool manager for shared memory only between kernel
* and secure world
* @dma_buf_mgr: pool manager for shared memory exported to user space
- * @destroy: called when destroying the pool
- * @private_data: private data for the pool
*/
struct tee_shm_pool {
- struct tee_shm_pool_mgr private_mgr;
- struct tee_shm_pool_mgr dma_buf_mgr;
- void (*destroy)(struct tee_shm_pool *pool);
- void *private_data;
+ struct tee_shm_pool_mgr *private_mgr;
+ struct tee_shm_pool_mgr *dma_buf_mgr;
};
#define TEE_DEVICE_FLAG_REGISTERED 0x1
@@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
void tee_device_put(struct tee_device *teedev);
+void teedev_ctx_get(struct tee_context *ctx);
+void teedev_ctx_put(struct tee_context *ctx);
+
#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 4bc7956cefc4..556960a1bab3 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -23,7 +23,6 @@
static void tee_shm_release(struct tee_shm *shm)
{
struct tee_device *teedev = shm->teedev;
- struct tee_shm_pool_mgr *poolm;
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
@@ -31,12 +30,32 @@ static void tee_shm_release(struct tee_shm *shm)
list_del(&shm->link);
mutex_unlock(&teedev->mutex);
- if (shm->flags & TEE_SHM_DMA_BUF)
- poolm = &teedev->pool->dma_buf_mgr;
- else
- poolm = &teedev->pool->private_mgr;
+ if (shm->flags & TEE_SHM_POOL) {
+ struct tee_shm_pool_mgr *poolm;
+
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ poolm = teedev->pool->dma_buf_mgr;
+ else
+ poolm = teedev->pool->private_mgr;
+
+ poolm->ops->free(poolm, shm);
+ } else if (shm->flags & TEE_SHM_REGISTER) {
+ size_t n;
+ int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
+
+ if (rc)
+ dev_err(teedev->dev.parent,
+ "unregister shm %p failed: %d", shm, rc);
+
+ for (n = 0; n < shm->num_pages; n++)
+ put_page(shm->pages[n]);
+
+ kfree(shm->pages);
+ }
+
+ if (shm->ctx)
+ teedev_ctx_put(shm->ctx);
- poolm->ops->free(poolm, shm);
kfree(shm);
tee_device_put(teedev);
@@ -76,6 +95,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct tee_shm *shm = dmabuf->priv;
size_t size = vma->vm_end - vma->vm_start;
+ /* Refuse sharing shared memory provided by application */
+ if (shm->flags & TEE_SHM_REGISTER)
+ return -EINVAL;
+
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
size, vma->vm_page_prot);
}
@@ -89,26 +112,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap,
};
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
- * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
- * associated with a dma-buf handle, else driver private memory.
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
+ struct tee_device *teedev,
+ size_t size, u32 flags)
{
- struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm;
void *ret;
int rc;
+ if (ctx && ctx->teedev != teedev) {
+ dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent,
"only mapped allocations supported\n");
@@ -135,13 +152,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_dev_put;
}
- shm->flags = flags;
+ shm->flags = flags | TEE_SHM_POOL;
shm->teedev = teedev;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
- poolm = &teedev->pool->dma_buf_mgr;
+ poolm = teedev->pool->dma_buf_mgr;
else
- poolm = &teedev->pool->private_mgr;
+ poolm = teedev->pool->private_mgr;
rc = poolm->ops->alloc(poolm, shm, size);
if (rc) {
@@ -171,9 +188,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_rem;
}
}
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
+
+ if (ctx) {
+ teedev_ctx_get(ctx);
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+ }
return shm;
err_rem:
@@ -188,8 +209,145 @@ err_dev_put:
tee_device_put(teedev);
return ret;
}
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
+ * associated with a dma-buf handle, else driver private memory.
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+{
+ return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
+}
EXPORT_SYMBOL_GPL(tee_shm_alloc);
+struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
+{
+ return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
+
+struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
+ size_t length, u32 flags)
+{
+ struct tee_device *teedev = ctx->teedev;
+ const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ struct tee_shm *shm;
+ void *ret;
+ int rc;
+ int num_pages;
+ unsigned long start;
+
+ if (flags != req_flags)
+ return ERR_PTR(-ENOTSUPP);
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->desc->ops->shm_register ||
+ !teedev->desc->ops->shm_unregister) {
+ tee_device_put(teedev);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ teedev_ctx_get(ctx);
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ shm->flags = flags | TEE_SHM_REGISTER;
+ shm->teedev = teedev;
+ shm->ctx = ctx;
+ shm->id = -1;
+ start = rounddown(addr, PAGE_SIZE);
+ shm->offset = addr - start;
+ shm->size = length;
+ num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
+ if (!shm->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
+ if (rc > 0)
+ shm->num_pages = rc;
+ if (rc != num_pages) {
+ if (rc >= 0)
+ rc = -ENOMEM;
+ ret = ERR_PTR(rc);
+ goto err;
+ }
+
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err;
+ }
+
+ rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
+ shm->num_pages, start);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err;
+ }
+
+ if (flags & TEE_SHM_DMA_BUF) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tee_shm_dma_buf_ops;
+ exp_info.size = shm->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = shm;
+
+ shm->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(shm->dmabuf)) {
+ ret = ERR_CAST(shm->dmabuf);
+ teedev->desc->ops->shm_unregister(ctx, shm);
+ goto err;
+ }
+ }
+
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+
+ return shm;
+err:
+ if (shm) {
+ size_t n;
+
+ if (shm->id >= 0) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
+ if (shm->pages) {
+ for (n = 0; n < shm->num_pages; n++)
+ put_page(shm->pages[n]);
+ kfree(shm->pages);
+ }
+ }
+ kfree(shm);
+ teedev_ctx_put(ctx);
+ tee_device_put(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_shm_register);
+
/**
* tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle
@@ -197,10 +355,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
*/
int tee_shm_get_fd(struct tee_shm *shm)
{
- u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
int fd;
- if ((shm->flags & req_flags) != req_flags)
+ if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
@@ -238,6 +395,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
*/
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return -EINVAL;
/* Check that we're in the range of the shm */
if ((char *)va < (char *)shm->kaddr)
return -EINVAL;
@@ -258,6 +417,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
*/
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return -EINVAL;
/* Check that we're in the range of the shm */
if (pa < shm->paddr)
return -EINVAL;
@@ -284,6 +445,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
*/
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return ERR_PTR(-EINVAL);
if (offs >= shm->size)
return ERR_PTR(-EINVAL);
return (char *)shm->kaddr + offs;
@@ -336,17 +499,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
/**
- * tee_shm_get_id() - Get id of a shared memory object
- * @shm: Shared memory handle
- * @returns id
- */
-int tee_shm_get_id(struct tee_shm *shm)
-{
- return shm->id;
-}
-EXPORT_SYMBOL_GPL(tee_shm_get_id);
-
-/**
* tee_shm_put() - Decrease reference count on a shared memory handle
* @shm: Shared memory handle
*/
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index fb4f8522a526..e6d4b9e4a864 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
shm->kaddr = NULL;
}
+static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ gen_pool_destroy(poolm->private_data);
+ kfree(poolm);
+}
+
static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
.alloc = pool_op_gen_alloc,
.free = pool_op_gen_free,
+ .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
};
-static void pool_res_mem_destroy(struct tee_shm_pool *pool)
-{
- gen_pool_destroy(pool->private_mgr.private_data);
- gen_pool_destroy(pool->dma_buf_mgr.private_data);
-}
-
-static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
- struct tee_shm_pool_mem_info *info,
- int min_alloc_order)
-{
- size_t page_mask = PAGE_SIZE - 1;
- struct gen_pool *genpool = NULL;
- int rc;
-
- /*
- * Start and end must be page aligned
- */
- if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
- (info->size & page_mask))
- return -EINVAL;
-
- genpool = gen_pool_create(min_alloc_order, -1);
- if (!genpool)
- return -ENOMEM;
-
- gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
- rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
- -1);
- if (rc) {
- gen_pool_destroy(genpool);
- return rc;
- }
-
- mgr->private_data = genpool;
- mgr->ops = &pool_ops_generic;
- return 0;
-}
-
/**
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
* memory range
@@ -104,42 +73,109 @@ struct tee_shm_pool *
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
struct tee_shm_pool_mem_info *dmabuf_info)
{
- struct tee_shm_pool *pool = NULL;
- int ret;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool) {
- ret = -ENOMEM;
- goto err;
- }
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
/*
* Create the pool for driver private shared memory
*/
- ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
- 3 /* 8 byte aligned */);
- if (ret)
- goto err;
+ rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
+ priv_info->size,
+ 3 /* 8 byte aligned */);
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
/*
* Create the pool for dma_buf shared memory
*/
- ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
- PAGE_SHIFT);
- if (ret)
+ rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
+ dmabuf_info->paddr,
+ dmabuf_info->size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr,
+ size_t size,
+ int min_alloc_order)
+{
+ const size_t page_mask = PAGE_SIZE - 1;
+ struct tee_shm_pool_mgr *mgr;
+ int rc;
+
+ /* Start and end must be page aligned */
+ if (vaddr & page_mask || paddr & page_mask || size & page_mask)
+ return ERR_PTR(-EINVAL);
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->private_data = gen_pool_create(min_alloc_order, -1);
+ if (!mgr->private_data) {
+ rc = -ENOMEM;
goto err;
+ }
- pool->destroy = pool_res_mem_destroy;
- return pool;
+ gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
+ rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
+ if (rc) {
+ gen_pool_destroy(mgr->private_data);
+ goto err;
+ }
+
+ mgr->ops = &pool_ops_generic;
+
+ return mgr;
err:
- if (ret == -ENOMEM)
- pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
- if (pool && pool->private_mgr.private_data)
- gen_pool_destroy(pool->private_mgr.private_data);
- kfree(pool);
- return ERR_PTR(ret);
+ kfree(mgr);
+
+ return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
+
+static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
+{
+ return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
+ mgr->ops->destroy_poolmgr;
+}
+
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+ struct tee_shm_pool_mgr *dmabuf_mgr)
+{
+ struct tee_shm_pool *pool;
+
+ if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->private_mgr = priv_mgr;
+ pool->dma_buf_mgr = dmabuf_mgr;
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
/**
* tee_shm_pool_free() - Free a shared memory pool
@@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
*/
void tee_shm_pool_free(struct tee_shm_pool *pool)
{
- pool->destroy(pool);
+ if (pool->private_mgr)
+ tee_shm_pool_mgr_destroy(pool->private_mgr);
+ if (pool->dma_buf_mgr)
+ tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
kfree(pool);
}
EXPORT_SYMBOL_GPL(tee_shm_pool_free);