diff options
author | Dave Jiang <dave.jiang@intel.com> | 2016-02-10 15:00:21 -0700 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-02-15 23:06:53 +0530 |
commit | 679cfbf79b4eb7d7d81195e6b9ab98106fd78a54 (patch) | |
tree | 32955ecf1140219e6739bf9cfb5a046440dd35ac /drivers/dma/ioat/init.c | |
parent | 92e963f50fc74041b5e9e744c330dca48e04f08d (diff) | |
download | linux-679cfbf79b4eb7d7d81195e6b9ab98106fd78a54.tar.bz2 |
dmaengine: IOATDMA: Convert pci_pool_* to dma_pool_*
Converting old pci_pool_* calls to "new" dma_pool_* to make everything
uniform.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat/init.c')
-rw-r--r-- | drivers/dma/ioat/init.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 4ef0c5e07912..b02b63b719db 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -505,7 +505,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) struct device *dev = &pdev->dev; /* DMA coherent memory pool for DMA descriptor allocations */ - ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, + ioat_dma->dma_pool = dma_pool_create("dma_desc_pool", dev, sizeof(struct ioat_dma_descriptor), 64, 0); if (!ioat_dma->dma_pool) { @@ -513,7 +513,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) goto err_dma_pool; } - ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, + ioat_dma->completion_pool = dma_pool_create("completion_pool", dev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES); @@ -546,9 +546,9 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) err_self_test: ioat_disable_interrupts(ioat_dma); err_setup_interrupts: - pci_pool_destroy(ioat_dma->completion_pool); + dma_pool_destroy(ioat_dma->completion_pool); err_completion_pool: - pci_pool_destroy(ioat_dma->dma_pool); + dma_pool_destroy(ioat_dma->dma_pool); err_dma_pool: return err; } @@ -559,8 +559,8 @@ static int ioat_register(struct ioatdma_device *ioat_dma) if (err) { ioat_disable_interrupts(ioat_dma); - pci_pool_destroy(ioat_dma->completion_pool); - pci_pool_destroy(ioat_dma->dma_pool); + dma_pool_destroy(ioat_dma->completion_pool); + dma_pool_destroy(ioat_dma->dma_pool); } return err; @@ -576,8 +576,8 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) dma_async_device_unregister(dma); - pci_pool_destroy(ioat_dma->dma_pool); - pci_pool_destroy(ioat_dma->completion_pool); + dma_pool_destroy(ioat_dma->dma_pool); + dma_pool_destroy(ioat_dma->completion_pool); INIT_LIST_HEAD(&dma->channels); } @@ -669,7 +669,7 @@ static void ioat_free_chan_resources(struct dma_chan *c) kfree(ioat_chan->ring); ioat_chan->ring = NULL; ioat_chan->alloc_order = 0; - pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, + dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion, ioat_chan->completion_dma); spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); @@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ ioat_chan->completion = - pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, + dma_pool_alloc(ioat_chan->ioat_dma->completion_pool, GFP_KERNEL, &ioat_chan->completion_dma); if (!ioat_chan->completion) return -ENOMEM; |