diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-02 13:22:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-02 13:22:38 -0700 |
commit | d975f309a8b250e67b66eabeb56be6989c783629 (patch) | |
tree | 846cae85891024f7c45416d085df90d98ec2e51b | |
parent | 52b084d31cbc8e90cb6fc1ac4061d9a24375c89d (diff) | |
parent | f8bcbe62acd0e1ce9004b83e98a4af87ae385dcf (diff) | |
download | linux-d975f309a8b250e67b66eabeb56be6989c783629.tar.bz2 |
Merge branch 'for-4.3/sg' of git://git.kernel.dk/linux-block
Pull SG updates from Jens Axboe:
"This contains a set of scatter-gather related changes/fixes for 4.3:
- Add support for limited chaining of sg tables even for
architectures that do not set ARCH_HAS_SG_CHAIN. From Christoph.
- Add sg chain support to target_rd. From Christoph.
- Fixup open coded sg->page_link in crypto/omap-sham. From
Christoph.
- Fixup open coded crypto ->page_link manipulation. From Dan.
- Also from Dan, automated fixup of manual sg_unmark_end()
manipulations.
- Also from Dan, automated fixup of open coded sg_phys()
implementations.
- From Robert Jarzmik, addition of an sg table splitting helper that
drivers can use"
* 'for-4.3/sg' of git://git.kernel.dk/linux-block:
lib: scatterlist: add sg splitting function
scatterlist: use sg_phys()
crypto/omap-sham: remove an open coded access to ->page_link
scatterlist: remove open coded sg_unmark_end instances
crypto: replace scatterwalk_sg_chain with sg_chain
target/rd: always chain S/G list
scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/microblaze/kernel/dma.c | 3 | ||||
-rw-r--r-- | block/blk-merge.c | 2 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 2 | ||||
-rw-r--r-- | crypto/gcm.c | 4 | ||||
-rw-r--r-- | drivers/crypto/bfin_crc.c | 3 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 2 | ||||
-rw-r--r-- | drivers/crypto/qce/sha.c | 2 | ||||
-rw-r--r-- | drivers/crypto/sahara.c | 2 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 2 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 4 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_chunk_heap.c | 4 | ||||
-rw-r--r-- | drivers/target/target_core_rd.c | 44 | ||||
-rw-r--r-- | include/crypto/scatterwalk.h | 10 | ||||
-rw-r--r-- | include/linux/scatterlist.h | 9 | ||||
-rw-r--r-- | lib/Kconfig | 7 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/sg_split.c | 202 |
21 files changed, 234 insertions, 81 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index cba12f34ff77..3d3d6aa60c87 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { - phys_addr_t phys = page_to_phys(sg_page(s)); + phys_addr_t phys = sg_phys(s) & PAGE_MASK; unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!is_coherent && diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec229437..c89da6312954 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, - sg->length, direction); + __dma_sync(sg_phys(sg), sg->length, direction); } return nents; diff --git a/block/blk-merge.c b/block/blk-merge.c index b2625271a572..d088cffb8105 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -393,7 +393,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, if (rq->cmd_flags & REQ_WRITE) memset(q->dma_drain_buffer, 0, q->dma_drain_size); - sg->page_link &= ~0x02; + sg_unmark_end(sg); sg = sg_next(sg); sg_set_page(sg, virt_to_page(q->dma_drain_buffer), q->dma_drain_size, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 945075292bc9..af31a0ee4057 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -145,7 +145,7 @@ static int skcipher_alloc_sgl(struct sock *sk) sgl->cur = 0; if (sg) - scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); list_add_tail(&sgl->list, &ctx->tsgl); } diff --git a/crypto/gcm.c b/crypto/gcm.c index ddb4f29b2fe6..bec329b3de8d 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -206,14 +206,14 @@ static void crypto_gcm_init_common(struct aead_request *req) sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) - scatterwalk_sg_chain(pctx->src, 2, sg); + sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) - scatterwalk_sg_chain(pctx->dst, 2, sg); + sg_chain(pctx->dst, 2, sg); } } diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index d9af9403ab6c..2f0b3337505d 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, sg_init_table(ctx->bufsl, nsg); sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); if (nsg > 1) - scatterwalk_sg_chain(ctx->bufsl, nsg, - req->src); + sg_chain(ctx->bufsl, nsg, req->src); ctx->sg = ctx->bufsl; } else ctx->sg = req->src; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index b2024c95a3cf..48adb2a0903e 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, * the dmaengine may try to DMA the incorrect amount of data. */ sg_init_table(&ctx->sgl, 1); - ctx->sgl.page_link = ctx->sg->page_link; + sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); ctx->sgl.offset = ctx->sg->offset; sg_dma_len(&ctx->sgl) = len32; sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 5c5df1d17f90..be2f5049256a 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req) if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); - scatterwalk_sg_chain(rctx->sg, 2, req->src); + sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 1c19e44c3146..820dc3acb28c 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req) sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); - scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); + sg_chain(rctx->in_sg_chain, 2, req->src); rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index cd774534d987..3b20a1bce703 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1929,7 +1929,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) sg_init_table(req_ctx->bufsl, nsg); sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); if (nsg > 1) - scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); + sg_chain(req_ctx->bufsl, 2, areq->src); req_ctx->psrc = req_ctx->bufsl; } else req_ctx->psrc = areq->src; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 697291aceea7..c82ebee6c7e5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2103,7 +2103,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; + pteval = (sg_phys(sg) & PAGE_MASK) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3631,7 +3631,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nelems; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f286090931cc..049df495c274 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + phys_addr_t phys = sg_phys(s); /* * We are mapping on IOMMU page boundaries, so offset within diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 5daf302835b1..6f4323c6d653 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -467,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, sg_set_buf(__sg, buf + offset, len); offset += len; remain -= len; - (__sg++)->page_link &= ~0x02; + sg_unmark_end(__sg++); sg_len++; } while (remain); } @@ -475,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, list_for_each_entry(req, &packed->list, queuelist) { sg_len += blk_rq_map_sg(mq->queue, req, __sg); __sg = sg + (sg_len - 1); - (__sg++)->page_link &= ~0x02; + sg_unmark_end(__sg++); } sg_mark_end(sg + (sg_len - 1)); return sg_len; diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 0813163f962f..195c41d7bd53 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, err: sg = table->sgl; for (i -= 1; i >= 0; i--) { - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, sg->length); sg = sg_next(sg); } @@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) DMA_BIDIRECTIONAL); for_each_sg(table->sgl, sg, table->nents, i) { - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, sg->length); } chunk_heap->allocated -= allocated_size; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 384cf8894411..47a833f3a145 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * sg_per_table = (total_sg_needed > max_sg_per_table) ? max_sg_per_table : total_sg_needed; -#ifdef CONFIG_ARCH_HAS_SG_CHAIN - /* * Reserve extra element for chain entry */ if (sg_per_table < total_sg_needed) chain_entry = 1; -#endif /* CONFIG_ARCH_HAS_SG_CHAIN */ - sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), GFP_KERNEL); if (!sg) { @@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * sg_init_table(sg, sg_per_table + chain_entry); -#ifdef CONFIG_ARCH_HAS_SG_CHAIN - if (i > 0) { sg_chain(sg_table[i - 1].sg_table, max_sg_per_table + 1, sg); } -#endif /* CONFIG_ARCH_HAS_SG_CHAIN */ - sg_table[i].sg_table = sg; sg_table[i].rd_sg_count = sg_per_table; sg_table[i].page_start_offset = page_offset; @@ -430,42 +422,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - - prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, - PAGE_SIZE); - - /* - * Allocate temporaly contiguous scatterlist entries if prot pages - * straddles multiple scatterlist tables. - */ - if (prot_table->page_end_offset < prot_page + prot_npages - 1) { - int i; - - prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); - if (!prot_sg) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - need_to_release = true; - sg_init_table(prot_sg, prot_npages); - - for (i = 0; i < prot_npages; i++) { - if (prot_page + i > prot_table->page_end_offset) { - prot_table = rd_get_prot_table(dev, - prot_page + i); - if (!prot_table) { - kfree(prot_sg); - return rc; - } - sg_unmark_end(&prot_sg[i - 1]); - } - prot_sg[i] = prot_table->sg_table[prot_page + i - - prot_table->page_start_offset]; - } - } - -#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ - if (is_read) rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 96670e7e7c14..35f99b68d037 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -25,14 +25,6 @@ #include <linux/scatterlist.h> #include <linux/sched.h> -static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, - struct scatterlist *sg2) -{ - sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); - sg1[num - 1].page_link &= ~0x02; - sg1[num - 1].page_link |= 0x01; -} - static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int chain, int num) @@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head, } if (sg) - scatterwalk_sg_chain(head, num, sg); + sg_chain(head, num, sg); else sg_mark_end(head); } diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 9b1ef0c820a7..556ec1ea2574 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - BUG(); -#endif - /* * offset and length are unused for chain entry. Clear them. */ @@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *); struct scatterlist *sg_last(struct scatterlist *s, unsigned int); void sg_init_table(struct scatterlist *, unsigned int); void sg_init_one(struct scatterlist *, const void *, unsigned int); +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask); typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); diff --git a/lib/Kconfig b/lib/Kconfig index 3a2ef67db6c7..dc516164415a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -521,6 +521,13 @@ config UCS2_STRING source "lib/fonts/Kconfig" +config SG_SPLIT + def_bool n + help + Provides a heler to split scatterlists into chunks, each chunk being a + scatterlist. This should be selected by a driver or an API which + whishes to split a scatterlist amongst multiple DMA channel. + # # sg chaining option # diff --git a/lib/Makefile b/lib/Makefile index 6897b527581a..2ee6ea2e9b08 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -160,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o +obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d105a9f56878..bafa9933fa76 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len); **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; -#endif #ifdef CONFIG_DEBUG_SG BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); diff --git a/lib/sg_split.c b/lib/sg_split.c new file mode 100644 index 000000000000..b063410c3593 --- /dev/null +++ b/lib/sg_split.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr> + * + * Scatterlist splitting helpers. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/scatterlist.h> +#include <linux/slab.h> + +struct sg_splitter { + struct scatterlist *in_sg0; + int nents; + off_t skip_sg0; + unsigned int length_last_sg; + + struct scatterlist *out_sg; +}; + +static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits, + off_t skip, const size_t *sizes, + struct sg_splitter *splitters, bool mapped) +{ + int i; + unsigned int sglen; + size_t size = sizes[0], len; + struct sg_splitter *curr = splitters; + struct scatterlist *sg; + + for (i = 0; i < nb_splits; i++) { + splitters[i].in_sg0 = NULL; + splitters[i].nents = 0; + } + + for_each_sg(in, sg, nents, i) { + sglen = mapped ? sg_dma_len(sg) : sg->length; + if (skip > sglen) { + skip -= sglen; + continue; + } + + len = min_t(size_t, size, sglen - skip); + if (!curr->in_sg0) { + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + } + size -= len; + curr->nents++; + curr->length_last_sg = len; + + while (!size && (skip + len < sglen) && (--nb_splits > 0)) { + curr++; + size = *(++sizes); + skip += len; + len = min_t(size_t, size, sglen - skip); + + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + curr->nents = 1; + curr->length_last_sg = len; + size -= len; + } + skip = 0; + + if (!size && --nb_splits > 0) { + curr++; + size = *(++sizes); + } + + if (!nb_splits) + break; + } + + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; +} + +static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + *out_sg = *in_sg; + if (!j) { + out_sg->offset += split->skip_sg0; + out_sg->length -= split->skip_sg0; + } else { + out_sg->offset = 0; + } + sg_dma_address(out_sg) = 0; + sg_dma_len(out_sg) = 0; + in_sg = sg_next(in_sg); + } + out_sg[-1].length = split->length_last_sg; + sg_mark_end(out_sg - 1); + } +} + +static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + sg_dma_address(out_sg) = sg_dma_address(in_sg); + sg_dma_len(out_sg) = sg_dma_len(in_sg); + if (!j) { + sg_dma_address(out_sg) += split->skip_sg0; + sg_dma_len(out_sg) -= split->skip_sg0; + } + in_sg = sg_next(in_sg); + } + sg_dma_len(--out_sg) = split->length_last_sg; + } +} + +/** + * sg_split - split a scatterlist into several scatterlists + * @in: the input sg list + * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped. + * @skip: the number of bytes to skip in the input sg list + * @nb_splits: the number of desired sg outputs + * @split_sizes: the respective size of each output sg list in bytes + * @out: an array where to store the allocated output sg lists + * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might + * be NULL if sglist not already mapped (in_mapped_nents = 0) + * @gfp_mask: the allocation flag + * + * This function splits the input sg list into nb_splits sg lists, which are + * allocated and stored into out. + * The @in is split into : + * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in + * - @out[1], which covers bytes [@skip + split_sizes[0] .. + * @skip + @split_sizes[0] + @split_sizes[1] -1] + * etc ... + * It will be the caller's duty to kfree() out array members. + * + * Returns 0 upon success, or error code + */ +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask) +{ + int i, ret; + struct sg_splitter *splitters; + + splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); + if (!splitters) + return -ENOMEM; + + ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes, + splitters, false); + if (ret < 0) + goto err; + + ret = -ENOMEM; + for (i = 0; i < nb_splits; i++) { + splitters[i].out_sg = kmalloc_array(splitters[i].nents, + sizeof(struct scatterlist), + gfp_mask); + if (!splitters[i].out_sg) + goto err; + } + + /* + * The order of these 3 calls is important and should be kept. + */ + sg_split_phys(splitters, nb_splits); + ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip, + split_sizes, splitters, true); + if (ret < 0) + goto err; + sg_split_mapped(splitters, nb_splits); + + for (i = 0; i < nb_splits; i++) { + out[i] = splitters[i].out_sg; + if (out_mapped_nents) + out_mapped_nents[i] = splitters[i].nents; + } + + kfree(splitters); + return 0; + +err: + for (i = 0; i < nb_splits; i++) + kfree(splitters[i].out_sg); + kfree(splitters); + return ret; +} +EXPORT_SYMBOL(sg_split); |