summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/misc/genwqe/card_base.h7
-rw-r--r--drivers/misc/genwqe/card_dev.c6
-rw-r--r--drivers/misc/genwqe/card_utils.c43
3 files changed, 38 insertions, 18 deletions
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5813b5f25006..3743c87f8ab9 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -182,6 +182,7 @@ struct dma_mapping {
struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
+ int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
@@ -189,6 +190,7 @@ static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
+ m->write = 1; /* Assume the maps we create are R/W */
}
/**
@@ -347,6 +349,7 @@ enum genwqe_requ_state {
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
+ * @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
@@ -356,6 +359,8 @@ struct genwqe_sgl {
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
+ int write;
+
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
@@ -369,7 +374,7 @@ struct genwqe_sgl {
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size);
+ void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index dd4617764f14..3ecfa35457e0 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -942,6 +942,10 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
+
+ if (ats_flags == ATS_TYPE_SGL_RD)
+ m->write = 0;
+
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
@@ -954,7 +958,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
- u_size);
+ u_size, m->write);
if (rc != 0)
goto err_out;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 147b83011b58..5c0d917636f7 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -296,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size)
+ void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -312,6 +312,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->user_addr = user_addr;
sgl->user_size = user_size;
+ sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
@@ -476,14 +477,20 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
+ size_t offset;
+ unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
- if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
- sgl->fpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ res = copy_to_user(sgl->user_addr,
+ sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying fpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
@@ -491,12 +498,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
- if (copy_to_user(sgl->user_addr + sgl->user_size -
- sgl->lpage_size, sgl->lpage,
- sgl->lpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ offset = sgl->user_size - sgl->lpage_size;
+ res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
+ sgl->lpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying lpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
@@ -599,14 +610,14 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- 1, /* write by caller */
+ m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, 0);
+ free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
@@ -618,7 +629,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, 0);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
@@ -651,7 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, 1);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;