summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLuis Chamberlain <mcgrof@kernel.org>2019-01-04 09:23:09 +0100
committerChristoph Hellwig <hch@lst.de>2019-01-08 07:58:37 -0500
commit750afb08ca71310fcf0c4e2cb1565c63b8235b60 (patch)
tree1dde3877eb4a1a0f0349786b66c3d9276ae94561 /drivers
parent3bd6e94bec122a951d462c239b47954cf5f36e33 (diff)
downloadlinux-750afb08ca71310fcf0c4e2cb1565c63b8235b60.tar.bz2
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such using dma_zalloc_coherent() is superflous. Phase it out. This change was generated with the following Coccinelle SmPL patch: @ replace_dma_zalloc_coherent @ expression dev, size, data, handle, flags; @@ -dma_zalloc_coherent(dev, size, handle, flags) +dma_alloc_coherent(dev, size, handle, flags) Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org> [hch: re-ran the script on the latest tree] Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/atm/he.c39
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c6
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c4
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c15
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c68
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/init.c29
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c27
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c61
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c8
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/fddi/skfp/skfddi.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c39
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c6
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c8
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/switch/switchtec.c8
-rw-r--r--drivers/rapidio/devices/tsi721.c22
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/s390/net/ism_drv.c15
-rw-r--r--drivers/scsi/3w-sas.c5
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c18
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c44
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c26
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_main.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c10
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c32
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c2
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
167 files changed, 902 insertions, 937 deletions
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 4dc528bf8e85..9c1247d42897 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
if (!pp)
return -ENOMEM;
- mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+ GFP_KERNEL);
if (!mem) {
kfree(pp);
return -ENOMEM;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102dcfec4..2e9d1cfe3aeb 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
static int he_init_tpdrq(struct he_dev *he_dev)
{
- he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
- &he_dev->tpdrq_phys, GFP_KERNEL);
+ he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+ &he_dev->tpdrq_phys,
+ GFP_KERNEL);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
goto out_free_rbpl_virt;
}
- he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
- &he_dev->rbpl_phys, GFP_KERNEL);
+ he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+ &he_dev->rbpl_phys, GFP_KERNEL);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* rx buffer ready queue */
- he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
- &he_dev->rbrq_phys, GFP_KERNEL);
+ he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ &he_dev->rbrq_phys, GFP_KERNEL);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* tx buffer ready queue */
- he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- &he_dev->tbrq_phys, GFP_KERNEL);
+ he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ &he_dev->tbrq_phys, GFP_KERNEL);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
- he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- (CONFIG_IRQ_SIZE + 1)
- * sizeof(struct he_irq),
- &he_dev->irq_phys,
- GFP_KERNEL);
+ he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
+ &he_dev->irq_phys, GFP_KERNEL);
if (he_dev->irq_base == NULL) {
hprintk("failed to allocate irq\n");
return -ENOMEM;
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
/* host status page */
- he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- sizeof(struct he_hsp),
- &he_dev->hsp_phys, GFP_KERNEL);
+ he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ sizeof(struct he_hsp),
+ &he_dev->hsp_phys, GFP_KERNEL);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..43a14579e80e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
- &scq->paddr, GFP_KERNEL);
+ scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
+ &scq->paddr, GFP_KERNEL);
if (scq->base == NULL) {
kfree(scq);
return NULL;
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
{
struct rsq_entry *rsqe;
- card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
- &card->rsq.paddr, GFP_KERNEL);
+ card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
+ &card->rsq.paddr, GFP_KERNEL);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
- card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
- 2 * sizeof(u32),
- &card->raw_cell_paddr,
- GFP_KERNEL);
+ card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
+ 2 * sizeof(u32),
+ &card->raw_cell_paddr,
+ GFP_KERNEL);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a10d5736d8f7..ab893a7571a2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address, GFP_KERNEL);
+ skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ &skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..acf79889d903 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ad85ab5e86..a876535529d1 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
mcode->num_cores = is_ae ? 6 : 10;
/* Allocate DMAable space */
- mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
- &mcode->phys_base, GFP_KERNEL);
+ mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
+ &mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 5c796ed55eba..2ca431ed1db8 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
- curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
- c_size + CPT_NEXT_CHUNK_PTR_SIZE,
- &curr->dma_addr, GFP_KERNEL);
+ curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
+ c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr,
+ GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
i, queue->nchunks);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 9138bae12521..4ace9bcd603a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
struct nitrox_device *ndev = cmdq->ndev;
cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
- cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
- &cmdq->unalign_dma,
- GFP_KERNEL);
+ cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
if (!cmdq->unalign_base)
return -ENOMEM;
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 44a4d2779b15..c9bfd4f439ce 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
/* Page alignment satisfies our needs for N <= 128 */
BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
- cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
- &cmd_q->qbase_dma,
- GFP_KERNEL);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a171d9..adc0cd8ae97b 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
} else {
/* new key */
- ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
- &ctx->pkey, GFP_KERNEL);
+ ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
if (!ctx->key) {
mutex_unlock(&ctx->lock);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c1ee4e7bf996..91ee2bb575df 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
struct sec_queue_ring_db *ring_db = &queue->ring_db;
int ret;
- ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
- &ring_cmd->paddr,
- GFP_KERNEL);
+ ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr, GFP_KERNEL);
if (!ring_cmd->vaddr)
return -ENOMEM;
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
mutex_init(&ring_cmd->lock);
ring_cmd->callback = sec_alg_callback;
- ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
- &ring_cq->paddr,
- GFP_KERNEL);
+ ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr, GFP_KERNEL);
if (!ring_cq->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cmd;
}
- ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
- &ring_db->paddr,
- GFP_KERNEL);
+ ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr, GFP_KERNEL);
if (!ring_db->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cq;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..1b0d156bb9be 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_zalloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_alloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..5660e5e5e022 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
if (!ring[i])
goto err_cleanup;
- ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->cmd_dma,
- GFP_KERNEL);
+ ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->cmd_dma,
+ GFP_KERNEL);
if (!ring[i]->cmd_base)
goto err_cleanup;
- ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->res_dma,
- GFP_KERNEL);
+ ring[i]->res_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->res_dma,
+ GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 3744b22f0c46..d28cba34773e 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
+ admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
if (!admin->virt_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
kfree(admin);
return -ENOMEM;
}
- admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
- PAGE_SIZE,
- &admin->const_tbl_addr,
- GFP_KERNEL);
+ admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ PAGE_SIZE,
+ &admin->const_tbl_addr,
+ GFP_KERNEL);
if (!admin->virt_tbl_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d2698299896f..975c75198f56 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
goto out_free_enc;
}
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
spin_unlock(&ctx->lock);
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
spin_unlock(&ctx->lock);
goto out_free_enc;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 320e7854b4ee..c9f324730d71 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
} else {
int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev,
- ctx->p_size,
- &qat_req->in.dh.in.b,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
- &qat_req->out.dh.r,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
return -EINVAL;
ctx->p_size = params->p_size;
- ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
return -ENOMEM;
memcpy(ctx->p, params->p, ctx->p_size);
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
return 0;
}
- ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
if (!ctx->g)
return -ENOMEM;
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (ret < 0)
goto err_clear_ctx;
- ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
- GFP_KERNEL);
+ ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
if (!ctx->xa) {
ret = -ENOMEM;
goto err_clear_ctx;
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.enc.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.enc.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.dec.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.dec.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
goto err;
ret = -ENOMEM;
- ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
if (!ctx->n)
goto err;
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
return -EINVAL;
}
- ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
if (!ctx->e)
return -ENOMEM;
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
goto err;
ret = -ENOMEM;
- ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
goto err;
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto err;
- ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
goto err;
memcpy(ctx->p + (half_key_sz - len), ptr, len);
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_p;
- ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
if (!ctx->q)
goto free_p;
memcpy(ctx->q + (half_key_sz - len), ptr, len);
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_q;
- ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
- GFP_KERNEL);
+ ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
if (!ctx->dp)
goto free_q;
memcpy(ctx->dp + (half_key_sz - len), ptr, len);
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dp;
- ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
- GFP_KERNEL);
+ ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
if (!ctx->dq)
goto free_dp;
memcpy(ctx->dq + (half_key_sz - len), ptr, len);
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dq;
- ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
- GFP_KERNEL);
+ ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
if (!ctx->qinv)
goto free_dq;
memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..86708fb9bda1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1a2028e1c29e 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
* and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
*/
pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
- ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
- &ring->tphys, GFP_NOWAIT);
+ ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
+ &ring->tphys, GFP_NOWAIT);
if (!ring->txd)
return -ENOMEM;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 35193b31a9e0..22cc7f68ef6e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
- CCW_BLOCK_SIZE,
- &mxs_chan->ccw_phys, GFP_KERNEL);
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 1d5988849aa6..eafd6c4b90fe 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
ring->size = ret;
/* Allocate memory for DMA ring descriptor */
- ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
- &ring->desc_paddr, GFP_KERNEL);
+ ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
+ &ring->desc_paddr, GFP_KERNEL);
if (!ring->desc_vaddr) {
chan_err(chan, "Failed to allocate ring desc\n");
return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 02880963092f..cb20b411493e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/* Allocate the buffer descriptors. */
- chan->seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->seg_v) *
- XILINX_DMA_NUM_DESCS,
- &chan->seg_p, GFP_KERNEL);
+ chan->seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
if (!chan->seg_v) {
dev_err(chan->dev,
"unable to allocate channel %d descriptors\n",
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->cyclic_seg_v),
- &chan->cyclic_seg_p, GFP_KERNEL);
+ chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p,
+ GFP_KERNEL);
if (!chan->cyclic_seg_v) {
dev_err(chan->dev,
"unable to allocate desc segment for cyclic DMA\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 8db51750ce93..4478787a247f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
list_add_tail(&desc->node, &chan->free_list);
}
- chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
- &chan->desc_pool_p, GFP_KERNEL);
+ chan->desc_pool_v = dma_alloc_coherent(chan->dev,
+ (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a9d9df6c85ad..693748ad8b88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ &dmah->busaddr,
+ GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 326805461265..19551aa43850 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
return NULL;
sbuf->size = size;
- sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_ATOMIC);
+ sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
+ &sbuf->dma_addr, GFP_ATOMIC);
if (!sbuf->sb)
goto bail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 59eeac55626f..57d4951679cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
pbl->pg_count++;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index df4f7a3f043d..8ac72ac7cbac 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
if (!wq->sq)
goto err3;
- wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+ depth * sizeof(union t3_wr),
+ &(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 981ff5cfb5d1..504cf525508f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
T4_RQT_ENTRY_SHIFT;
- wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev,
- wq->memsize, &wq->dma_addr,
- GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
+ &wq->dma_addr, GFP_KERNEL);
if (!wq->queue)
goto err_free_rqtpool;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 09044905284f..7835eb52e7c5 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
goto done;
/* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_dma,
- GFP_KERNEL);
+ dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma,
+ GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
- rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
- gfp_flags | __GFP_COMP);
+ rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
+ &rcd->rcvhdrq_dma,
+ gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
- rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE,
- &rcd->rcvhdrqtailaddr_dma, gfp_flags);
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma,
+ gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
while (alloced_bytes < rcd->egrbufs.size &&
rcd->egrbufs.alloced < rcd->egrbufs.count) {
rcd->egrbufs.buffers[idx].addr =
- dma_zalloc_coherent(&dd->pcidev->dev,
- rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].dma,
- gfp_flags);
+ dma_alloc_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.rcvtid_size,
+ &rcd->egrbufs.buffers[idx].dma,
+ gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index dd5a5c030066..04126d7e318d 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
- dd->cr_base[i].va = dma_zalloc_coherent(
- &dd->pcidev->dev,
- bytes,
- &dd->cr_base[i].dma,
- GFP_KERNEL);
+ dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
+ bytes,
+ &dd->cr_base[i].dma,
+ GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index b84356e1a4c1..96897a91fb0a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
timer_setup(&sde->err_progress_check_timer,
sdma_err_progress_check, 0);
- sde->descq = dma_zalloc_coherent(
- &dd->pcidev->dev,
- descq_cnt * sizeof(u64[2]),
- &sde->descq_phys,
- GFP_KERNEL
- );
+ sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
+ descq_cnt * sizeof(u64[2]),
+ &sde->descq_phys, GFP_KERNEL);
if (!sde->descq)
goto bail;
sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
/* Allocate memory for DMA of head registers to memory */
- dd->sdma_heads_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- dd->sdma_heads_size,
- &dd->sdma_heads_phys,
- GFP_KERNEL
- );
+ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->sdma_heads_size,
+ &dd->sdma_heads_phys,
+ GFP_KERNEL);
if (!dd->sdma_heads_dma) {
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
goto bail;
}
/* Allocate memory for pad */
- dd->sdma_pad_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- sizeof(u32),
- &dd->sdma_pad_phys,
- GFP_KERNEL
- );
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+ &dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
goto bail;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 6300033a448f..dac058d3df53 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->npages = 1 << order;
buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_zalloc_coherent(dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_zalloc_coherent(dev,
- page_size, &t,
- GFP_KERNEL);
+ buf->page_list[i].buf = dma_alloc_coherent(dev,
+ page_size,
+ &t,
+ GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 3a669451cf86..543fa1504cd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
size = (eq->entries - eqe_alloc) * eq->eqe_size;
}
- eq->buf[i] = dma_zalloc_coherent(dev, size,
+ eq->buf[i] = dma_alloc_coherent(dev, size,
&(eq->buf_dma[i]),
GFP_KERNEL);
if (!eq->buf[i])
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
size = (eq->entries - eqe_alloc)
* eq->eqe_size;
}
- eq->buf[idx] = dma_zalloc_coherent(dev, size,
- &(eq->buf_dma[idx]),
- GFP_KERNEL);
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
if (!eq->buf[idx])
goto err_dma_alloc_buf;
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
goto free_cmd_mbox;
}
- eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz,
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
&(eq->buf_list->map),
GFP_KERNEL);
if (!eq->buf_list->buf) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index a9ea966877f2..59e978141ad4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
if (!mem)
return I40IW_ERR_PARAM;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
- (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
if (!mem->va)
return I40IW_ERR_NO_MEMORY;
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index cc9c0c8ccba3..112d2f38e0de 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
+ MTHCA_ICM_PAGE_SIZE, &page->mapping,
+ GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 241a57a07485..097e5ab2a19f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
+ GFP_KERNEL);
if (!q->va)
return -ENOMEM;
return 0;
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
qp->sq.len = len;
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
qp->rq.pa = pa;
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
- GFP_KERNEL);
+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index dd15474b19b7..6be0ea109138 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c46bed0c5513..287c332ff0e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b342a70e2814..e1ccf32b1c3d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
+ flags);
if (!va)
goto err;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index eaa109dbc96a..39c37b6fd715 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index f456c1125bd6..69881265d121 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
return -ENOMEM;
ts->pdev = pdev;
- ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
- GFP_KERNEL);
+ ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
+ GFP_KERNEL);
if (!ts->fw_regs_va) {
dev_err(dev, "failed to dma_alloc_coherent\n");
return -ENOMEM;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6ede4286b835..730f7dabcf37 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
spin_lock_init(&dom->pgtlock);
- dom->pgt_va = dma_zalloc_coherent(data->dev,
- M2701_IOMMU_PGT_SIZE,
- &dom->pgt_pa, GFP_KERNEL);
+ dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
+ &dom->pgt_pa, GFP_KERNEL);
if (!dom->pgt_va)
return -ENOMEM;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 447baaebca44..cdb79ae2d8dc 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
{
struct device *dev = &cio2->pci_dev->dev;
- q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
- GFP_KERNEL);
+ q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
+ GFP_KERNEL);
if (!q->fbpt)
return -ENOMEM;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index e80123cba406..060c0ad6243a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;
- mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
size);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index efe2fb72d54b..25265fd0fd6e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
if (get_order(size) >= MAX_ORDER)
return NULL;
- return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
- GFP_KERNEL);
+ return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+ GFP_KERNEL);
}
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a22e11a65658..eba9bcc92ad3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
* Use zalloc to zero the reserved high 32-bits of 128-bit
* descriptors so that they never need to be written.
*/
- buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
- host->adma_table_sz, &dma, GFP_KERNEL);
+ buf = dma_alloc_coherent(mmc_dev(mmc),
+ host->align_buffer_sz + host->adma_table_sz,
+ &dma, GFP_KERNEL);
if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 91fc64c1145e..47e5984f16fb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 0b60921c392f..16477aa6d61f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
- descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
- GFP_KERNEL);
+ descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
+ GFP_KERNEL);
if (!descs) {
netdev_err(sdev->netdev,
"failed to allocate status descriptors\n");
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
struct slic_shmem_data *sm_data;
dma_addr_t paddr;
- sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
- &paddr, GFP_KERNEL);
+ sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
+ &paddr, GFP_KERNEL);
if (!sm_data) {
dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
return -ENOMEM;
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
int err = 0;
u8 *mac[2];
- eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
- &paddr, GFP_KERNEL);
+ eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
+ &paddr, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 420cede41ca4..b17d435de09f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
struct ena_com_admin_sq *sq = &queue->sq;
u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
+ GFP_KERNEL);
if (!sq->entries) {
pr_err("memory allocation failed");
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
struct ena_com_admin_cq *cq = &queue->cq;
u16 size = ADMIN_CQ_SIZE(queue->q_depth);
- cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
+ GFP_KERNEL);
if (!cq->entries) {
pr_err("memory allocation failed");
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
if (!aenq->entries) {
pr_err("memory allocation failed");
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_cq->cdesc_addr.virt_addr) {
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
rss->hash_key =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
rss->hash_ctrl =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
sizeof(struct ena_admin_rss_ind_table_entry);
rss->rss_ind_tbl =
- dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+ &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
spin_lock_init(&mmio_read->lock);
mmio_read->read_resp =
- dma_zalloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->host_info =
- dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->debug_area_virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
+ &host_attr->debug_area_dma_addr,
+ GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 0f2ad50f3bd7..87b142a312e0 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/* Packet buffers should be 64B aligned */
- pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
- GFP_ATOMIC);
+ pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
if (unlikely(!pkt_buf)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
ring->ndev = ndev;
size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
- ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
- GFP_KERNEL);
+ ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
if (!ring->desc_addr)
goto err;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c131cfc1b79d..e3538ba7d0e7 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
alx->num_txq +
sizeof(struct alx_rrd) * alx->rx_ringsz +
sizeof(struct alx_rfd) * alx->rx_ringsz;
- alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- &alx->descmem.dma,
- GFP_KERNEL);
+ alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma, GFP_KERNEL);
if (!alx->descmem.virt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7087b88550db..3a3b35b5df67 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
8 * 4;
- ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
- &ring_header->dma, GFP_KERNEL);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
goto err_nomem;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6bae973d4dce..09cd188826b1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4574275ef445..f9521d0274b7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
- GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index cabc8e49ad24..2d3a44c40221 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
/* Alloc ring of descriptors */
size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
ring->mmio_base);
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
/* Alloc ring of descriptors */
size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
ring->mmio_base);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bbb247116045..d63371d70bce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
BNX2_SBLK_MSIX_ALIGN_SIZE);
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (!status_blk)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa80da973d7..4ab6eb3baefc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3449,10 +3449,10 @@ alloc_ext_stats:
goto alloc_tx_ext_stats;
bp->hw_rx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map,
+ GFP_KERNEL);
if (!bp->hw_rx_port_stats_ext)
return 0;
@@ -3462,10 +3462,10 @@ alloc_tx_ext_stats:
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 15c7041e937b..70775158c8c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 140dbd62106d..7f56032e44ac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return -EFAULT;
}
- data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
- &data_dma_addr, GFP_KERNEL);
+ data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
if (!data_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b1397af81f7..b1627dd5f2fd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping, GFP_KERNEL);
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL);
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fcaf18fa3904..5b4d3badcb73 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
dmem->q_len = q_len;
dmem->size = (desc_size * q_len) + align_bytes;
/* Save address, need it while freeing */
- dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
&dmem->dma, GFP_KERNEL);
if (!dmem->unalign_base)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..85f22c286680 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b90188401d4a..fc0bc6458e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 3007e1ac1e61..1d534f0baa69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
* Allocate the hardware ring and PCI DMA bus address space for said.
*/
size_t hwlen = nelem * hwsize + stat_size;
- void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
if (!hwring)
return NULL;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1e9d882c04ef..59a7f0b99069 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_fat_cmd.size,
- &get_fat_cmd.dma, GFP_ATOMIC);
+ get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va)
return -ENOMEM;
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
}
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- attribs_cmd.size,
- &attribs_cmd.dma, GFP_ATOMIC);
+ attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
- get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma,
- GFP_ATOMIC);
+ get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va)
return -ENOMEM;
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3f6749fc889f..4c218341c51b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
- read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
- &read_cmd.dma, GFP_ATOMIC);
+ read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- ddrdma_cmd.size, &ddrdma_cmd.dma,
- GFP_KERNEL);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
if (!ddrdma_cmd.va)
return -ENOMEM;
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- eeprom_cmd.size, &eeprom_cmd.dma,
- GFP_KERNEL);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
if (!eeprom_cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 852f5bfe5f6d..d5026909dec5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ &mem->dma, GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter)
int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va)
return -ENOMEM;
@@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter)
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
if (!rx_filter->va) {
status = -ENOMEM;
goto free_mbox;
@@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter)
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
else
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
- stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
- &stats_cmd->dma, GFP_KERNEL);
+ stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
+ &stats_cmd->dma, GFP_KERNEL);
if (!stats_cmd->va) {
status = -ENOMEM;
goto free_rx_filter;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4d673225ed3e..3e5e97186fc4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
return -ENOMEM;
/* Allocate descriptors */
- priv->rxdes = dma_zalloc_coherent(priv->dev,
- MAX_RX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_rxdes),
- &priv->rxdes_dma, GFP_KERNEL);
+ priv->rxdes = dma_alloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
if (!priv->rxdes)
return -ENOMEM;
- priv->txdes = dma_zalloc_coherent(priv->dev,
- MAX_TX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_txdes),
- &priv->txdes_dma, GFP_KERNEL);
+ priv->txdes = dma_alloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
if (!priv->txdes)
return -ENOMEM;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 084f24daf2b5..2a0e820526dc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_zalloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 471805ea363b..e5d853b7b454 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 07cd58798083..1bf7a5f116a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
- &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8af0cef5609b..e483a6e730e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5765c8cf3a3..4e78e8812a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index c40603a183df..b4fefb4c3064 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
u8 *cmd_vaddr;
int err = 0;
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
+ cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
+ &cmd_paddr, GFP_KERNEL);
if (!cmd_vaddr) {
dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
return -ENOMEM;
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
dma_addr_t node_paddr;
int err;
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
+ node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
+ GFP_KERNEL);
if (!node) {
dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
return -ENOMEM;
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
if (!chain->cell_ctxt)
return -ENOMEM;
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
+ chain->wb_status = dma_alloc_coherent(&pdev->dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
if (!chain->wb_status) {
dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 7cb8b9b94726..683e67515016 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
}
for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
+ eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
+ eq->page_size,
+ &eq->dma_addr[pg],
+ GFP_KERNEL);
if (!eq->virt_addr[pg]) {
err = -ENOMEM;
goto err_dma_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 8e5897669a3a..a322a22d9357 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
goto err_sq_db;
}
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
+ ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
+ &func_to_io->ci_dma_base,
+ GFP_KERNEL);
if (!ci_addr_base) {
dev_err(&pdev->dev, "Failed to allocate CI area\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index bbf9bdd0ee3e..d62cf509646a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
goto err_cqe_dma_arr_alloc;
for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
+ rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
+ sizeof(*rq->cqe[i]),
+ &rq->cqe_dma[i], GFP_KERNEL);
if (!rq->cqe[i])
goto err_cqe_alloc;
}
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
/* HW requirements: Must be at least 32 bit */
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
+ rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
+ &rq->pi_dma_addr, GFP_KERNEL);
if (!rq->pi_virt_addr) {
dev_err(&pdev->dev, "Failed to allocate PI address\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 1dfa7eb05c10..cb66e7024659 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
struct pci_dev *pdev = hwif->pdev;
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
+ GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
return -ENOMEM;
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
u64 *paddr = &wq->block_vaddr[i];
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
+ &dma_addr, GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate wq page\n");
goto err_alloc_wq_pages;
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fff09dcf9e34..787d5aca5278 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..189f231075c2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
- ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..f52e2c46e6a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
struct i40e_pf *pf = (struct i40e_pf *)hw->back;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e0875476a780..16066c2d5b3a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
u32 txq_dma;
/* Allocate memory for TX descriptors */
- aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
+ aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 0bd4351b2a49..f8a6d6e3cb7a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* table is full.
*/
if (!pep->htpr) {
- pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (!pep->htpr)
return -ENOMEM;
} else {
@@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL);
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
@@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL);
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 399f565dd85a..fe9653fa8aea 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -598,10 +598,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_zalloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC);
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -1213,8 +1213,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
@@ -1310,9 +1310,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_zalloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 9af34e03892c..dbc483e4a2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 456f30007ad6..421b9c3c8bf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
- cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
- dma_handle, GFP_KERNEL);
+ cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
+ GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3125cdf69db..3e0fa8a8077b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = &dev->pdev->dev;
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
@@ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
- 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5f384f73007d..19ce0e605096 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL);
+ ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e97636d2e6ee..7d2d4241498f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->cnt = dp->txd_cnt;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->txds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
@@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!rx_ring->rxds) {
netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
rx_ring->cnt);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0611f2335b4a..1e408d1a9b5f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
priv->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->tx_bd_v) * TX_BD_NUM,
- &priv->tx_bd_p, GFP_KERNEL);
+ priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ &priv->tx_bd_p, GFP_KERNEL);
if (!priv->tx_bd_v)
goto out;
@@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
if (!priv->tx_skb)
goto out;
- priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->rx_bd_v) * RX_BD_NUM,
- &priv->rx_bd_p, GFP_KERNEL);
+ priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ &priv->rx_bd_p, GFP_KERNEL);
if (!priv->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..552d930e3940 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool =
- dma_zalloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 8a31a02c9f47..d21041554507 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dc1c1b616084..c2ad405b2f50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
u32 size = min_t(u32, total_size, psz);
void **p_virt = &p_mngr->t2[i].p_virt;
- *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_mngr->t2[i].p_phys,
+ GFP_KERNEL);
if (!p_mngr->t2[i].p_virt) {
rc = -ENOMEM;
goto t2_fail;
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
@@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
goto out0;
}
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page, &p_phys,
- GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d344e9d43832..af38d3d73291 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
void *pci_info_addr;
int err = 0, i;
- pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..8d790313ee3d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
ring_header->used = 0;
- ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
+ ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
&ring_header->dma_addr,
GFP_KERNEL);
if (!ring_header->v_addr)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 690aee88f0eb..6d22dd500790 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
}
/* allocate memory for TX descriptors */
- tx_ring->dma_tx = dma_zalloc_coherent(dev,
- tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
- &tx_ring->dma_tx_phy, GFP_KERNEL);
+ tx_ring->dma_tx = dma_alloc_coherent(dev,
+ tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ &tx_ring->dma_tx_phy, GFP_KERNEL);
if (!tx_ring->dma_tx)
return -ENOMEM;
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
rx_ring->queue_no = queue_no;
/* allocate memory for RX descriptors */
- rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- &rx_ring->dma_rx_phy, GFP_KERNEL);
+ rx_ring->dma_rx = dma_alloc_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ &rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index a8ecb33390da..9c07b5175581 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -33,8 +33,8 @@
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aa1945a858d5..c2d45a40eb48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -34,8 +34,8 @@
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 703fbbefea44..0e1b7e960b98 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 05a0948ad929..a18149720aa2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
struct netsec_desc_ring *dring = &priv->desc_ring[id];
int i;
- dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
- &dring->desc_dma, GFP_KERNEL);
+ dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
if (!dring->vaddr)
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e0a0789c2ed..0c4ab3444cc3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_erx)
goto err_dma;
} else {
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_rx)
goto err_dma;
}
@@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_etx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_extended_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_etx)
goto err_dma;
} else {
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_tx)
goto err_dma;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e60b30d..37925a1d58de 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
- &data->rxdma, GFP_KERNEL);
+ data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
+ &data->rxdma, GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
+ &data->txdma, GFP_KERNEL);
if (!data->txring) {
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2241f9897092..15bb058db392 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 12a14609ec47..0789d8af7d72 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..38ac8ef41f5f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
- bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
- &bp->kmalloced_dma,
- GFP_ATOMIC);
+ bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
+ &bp->kmalloced_dma,
+ GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 72433f3efc74..5d661f60b101 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev)
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
- bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev,
- bp->SharedMemSize,
- &bp->SharedMemDMA,
- GFP_ATOMIC);
+ bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
+ bp->SharedMemSize,
+ &bp->SharedMemDMA,
+ GFP_ATOMIC);
if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e454dfc9ad8f..89984fcab01e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
}
sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
- tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
- &tq->buf_info_pa, GFP_KERNEL);
+ tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info)
goto err;
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
- GFP_KERNEL);
+ bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
+ GFP_KERNEL);
if (!bi)
goto err;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 839fa7715709..be6485428198 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
/* Get BD buffer */
- bd_buffer = dma_zalloc_coherent(priv->dev,
- (RX_BD_RING_LEN + TX_BD_RING_LEN) *
- MAX_RX_BUF_LENGTH,
- &bd_dma_addr, GFP_KERNEL);
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
if (!bd_buffer) {
dev_err(priv->dev, "Could not allocate buffer descriptors\n");
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f6d3ecbdd3a3..2a5668b4f6bc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- dma_zalloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr, GFP_KERNEL);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e49b36752ba2..49758490eaba 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ arvif->beacon_buf = dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 01b4edb00e9e..39e0b1cc2a12 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
- alloc_nbytes,
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ba837403e266..8e236d158ca6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 5ab3e31c9ffa..bab30f7a443c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
- &wcn_ch->dma_addr,
- GFP_KERNEL);
+ wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->mgmt_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->data_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 05a8348bd7b9..3380aaef456c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil,
/* Status messages are allocated and initialized to 0. This is necessary
* since DR bit should be initialized to 0.
*/
- sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
if (!sring->va)
return -ENOMEM;
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
if (!ring->ctx)
goto err;
- ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
if (!ring->va)
goto err_free_ctx;
if (ring->is_rx) {
sz = sizeof(*ring->edma_rx_swtail.va);
ring->edma_rx_swtail.va =
- dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
if (!ring->edma_rx_swtail.va)
goto err_free_va;
}
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index dfc4c34298d4..b34e51933257 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- ring_mem_size, &(ring->dmabase),
- GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ ring_mem_size, &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1b1da7d83652..2ce1537d983c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase), GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase), GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 16d7dda965d8..0f69b3fa296e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
u32 addr;
devinfo->shared.scratch =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
- &devinfo->shared.scratch_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
@@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
devinfo->shared.ringupd =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
- &devinfo->shared.ringupd_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e965cc588850..9e850c25877b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
* Allocate the circular buffer of Read Buffer Descriptors
* (RBDs)
*/
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err;
if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- (use_rx_td ?
- sizeof(*rxq->cd) :
- sizeof(__le32)) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
+ rxq->used_bd = dma_alloc_coherent(dev,
+ (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ &rxq->used_bd_dma,
+ GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
/* Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
- sizeof(__le16) :
- sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma,
- GFP_KERNEL);
+ rxq->rb_stts = dma_alloc_coherent(dev,
+ use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err;
@@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
return 0;
/* Allocate the driver's pointer to TR tail */
- rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->tr_tail_dma,
- GFP_KERNEL);
+ rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma, GFP_KERNEL);
if (!rxq->tr_tail)
goto err;
/* Allocate the driver's pointer to CR tail */
- rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->cr_tail_dma,
- GFP_KERNEL);
+ rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
/*
@@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_zalloc_coherent(trans->dev, ICT_SIZE,
- &trans_pcie->ict_tbl_dma,
- GFP_KERNEL);
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
if (!trans_pcie->ict_tbl)
return -ENOMEM;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index 528cb0401df1..4956a54151cb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
/*
* Allocate DMA memory for descriptor and buffer.
*/
- addr = dma_zalloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size, &dma,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size, &dma,
+ GFP_KERNEL);
if (!addr)
return -ENOMEM;
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 5ee5f40b4dfc..f1eaa3c4d46a 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
int rc;
sndev->nr_rsvd_luts++;
- sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
- LUT_SIZE,
- &sndev->self_shared_dma,
- GFP_KERNEL);
+ sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
+ LUT_SIZE,
+ &sndev->self_shared_dma,
+ GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw\n");
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a0bf6a24d50..e8d0942c9c92 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1485,8 +1485,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (dev->ctrl.queue_count > qid)
return 0;
- nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
- &nvmeq->cq_dma_addr, GFP_KERNEL);
+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
@@ -1915,8 +1915,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
max_entries = dev->ctrl.hmmaxd;
- descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
- &descs_dma, GFP_KERNEL);
+ descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..cb3401a931f8 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
}
/* Reserve memory for event queue and make sure memories are zeroed */
- msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
- msi->nr_eq_region * EQ_MEM_REGION_SIZE,
- &msi->eq_dma, GFP_KERNEL);
+ msi->eq_cpu = dma_alloc_coherent(pcie->dev,
+ msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ &msi->eq_dma, GFP_KERNEL);
if (!msi->eq_cpu) {
ret = -ENOMEM;
goto free_irqs;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 6c5536d3d42a..e22766c79fe9 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
return 0;
- stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev,
- sizeof(*stdev->dma_mrpc),
- &stdev->dma_mrpc_dma_addr,
- GFP_KERNEL);
+ stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
+ sizeof(*stdev->dma_mrpc),
+ &stdev->dma_mrpc_dma_addr,
+ GFP_KERNEL);
if (stdev->dma_mrpc == NULL)
return -ENOMEM;
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index bb655854713d..b64c56c33c3b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
- IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
- &priv->idb_dma, GFP_KERNEL);
+ priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+ &priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
/* Allocate space for DMA descriptors */
- bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
- bd_num * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_KERNEL);
+ bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
if (!bd_ptr)
return -ENOMEM;
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
- priv->omsg_ring[mbox].sts_size *
- sizeof(struct tsi721_dma_sts),
- &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
+ priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+ &priv->omsg_ring[mbox].sts_phys,
+ GFP_KERNEL);
if (priv->omsg_ring[mbox].sts_base == NULL) {
tsi_debug(OMSG, &priv->pdev->dev,
"ENOMEM for OB_MSG_%d status FIFO", mbox);
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 006ea5a45020..7f5d4436f594 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
* Allocate space for DMA descriptors
* (add an extra element for link descriptor)
*/
- bd_ptr = dma_zalloc_coherent(dev,
- (bd_num + 1) * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_ATOMIC);
+ bd_ptr = dma_alloc_coherent(dev,
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_ATOMIC);
if (!bd_ptr)
return -ENOMEM;
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
(bd_num + 1) : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(dev,
+ sts_ptr = dma_alloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_ATOMIC);
if (!sts_ptr) {
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..ed8e58f09054 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_sba *sba;
- sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!sba)
return -ENOMEM;
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_eq *ieq;
- ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!ieq)
return -ENOMEM;
@@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr, GFP_KERNEL |
- __GFP_NOWARN | __GFP_NOMEMALLOC |
- __GFP_COMP | __GFP_NORETRY);
+ dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index e8f5f7c63190..cd096104bcec 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
- size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle,
+ GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ff53fd0d12f2..66c514310f3c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
- GFP_KERNEL);
+ host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
- GFP_KERNEL);
+ host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0f6751b0a633..57c6fa388bf6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
struct MessageUnit_D *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
uint32_t completeQ_size;
completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
acb->roundup_ccbsize = roundup(completeQ_size, 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent){
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 39f3820572b4..74e260027c7d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca7b7bbc8371..d4febaadfaa3 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
- GFP_KERNEL);
+ cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- &nonemb_cmd.dma,
- GFP_KERNEL);
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca1b366..d8e6d7480f35 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
/* Allocate dma coherent memory */
buf_info = buf_base;
buf_info->size = payload_len;
- buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
- buf_info->size, &buf_info->phys,
- GFP_KERNEL);
+ buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
+ buf_info->size, &buf_info->phys,
+ GFP_KERNEL);
if (!buf_info->virt)
goto out_free_mem;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d671d23..039328d9ef13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
}
- hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
- mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c440c8..d735e87e416a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->xferq_mem_size, &tgt->xferq_dma,
- GFP_KERNEL);
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_mem_size, &tgt->confq_dma,
- GFP_KERNEL);
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91f5316aa3ab..fae6f71e677d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index dc12933533d5..66bbd21819ae 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
- GFP_KERNEL);
+ q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8698af86485d..2dc564e59430 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
INIT_LIST_HEAD(&dmabuf->list);
/* now, allocate dma buffer */
- dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
- &(dmabuf->phys), GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c1c36812c3d2..bede11e16349 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
if (!dmabuf)
return NULL;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- LPFC_HDR_TEMPLATE_SIZE,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
rpi_hdr = NULL;
goto err_free_dmabuf;
@@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p.phys, GFP_KERNEL);
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
if (!phba->slim2p.virt)
goto out_iounmap;
@@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
* plus an alignment restriction of 16 bytes.
*/
bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f6a5083a621e..4d3b94317515 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
* page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
- SLI4_PAGE_SIZE, &phyaddr,
- GFP_KERNEL);
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE, &phyaddr,
+ GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 30734caf77e1..12fd74761ae0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
* mailbox command.
*/
dma_size = *vpd_size;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
goto free_mem;
}
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
- &dmabuf->phys,
- GFP_KERNEL);
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
rc = -ENOMEM;
@@ -14613,9 +14612,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
goto out_fail;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- hw_page_size, &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ hw_page_size, &dmabuf->phys,
+ GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e836392b75e8..f112458023ff 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
- sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
- GFP_KERNEL);
+ raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h, GFP_KERNEL);
+ adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h, GFP_KERNEL);
+ pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f7bdd783360a..7eaa400f6328 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- dma_zalloc_coherent(&instance->pdev->dev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h,
+ GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct megasas_evt_log_info), &el_info_h,
- GFP_KERNEL);
+ el_info = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h, GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 211c17c33aa0..a9a25f0eaf6f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -689,8 +689,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
- array_size, &fusion->rdpq_phys, GFP_KERNEL);
+ fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys,
+ GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f3e182eb0970..c9dc7740e9e7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
- ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
+ dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus,
+ GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index dbe753fba486..36f64205ecfa 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
- &res->bus_addr, GFP_KERNEL);
+ res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr,
+ GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b3be49d41375..4c5a3d23e010 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
- &mem_dma_handle, GFP_KERNEL);
+ mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index edcaf4b0cb0b..9bbc19fc190b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
sizeof(void *);
fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
- fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
+ fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+ &fcport->sq_dma, GFP_KERNEL);
if (!fcport->sq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
rval = 1;
goto out;
}
- fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+ fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl_size,
+ &fcport->sq_pbl_dma, GFP_KERNEL);
if (!fcport->sq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
rval = 1;
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
/* Allocate list of PBL pages */
- qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
- QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_PAGE_SIZE,
+ &qedf->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedf->bdq_pbl_list) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
return -ENOMEM;
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
qedf->global_queues[i]->cq =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_mem_size,
- &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_mem_size,
+ &qedf->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
}
qedf->global_queues[i]->cq_pbl =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_pbl_size,
- &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_pbl_size,
+ &qedf->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5c53409a8cea..e74a62448ba4 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
struct qedi_nvm_iscsi_image nvm_image;
- qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
- &qedi->nvm_buf_dma,
- GFP_KERNEL);
+ qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(nvm_image),
+ &qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
return -ENOMEM;
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev,
- QEDI_PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 00444dc79756..ac504a1ff0ff 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_chip_is_down(vha))
goto done;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d944d6..17d42658ad9a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..cbc3bc49d4d1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
return rval;
}
- sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
- &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
- &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
((vha->hw->max_fibre_devices - 1) *
sizeof(struct ct_sns_gpn_ft_data));
- sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
- &vha->hw->pdev->dev, rspsz,
- &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ rspsz,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364bb52ed2a6..aeeb0144bd55 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
FCE_SIZE, ha->fce, ha->fce_dma);
/* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
@@ -3131,8 +3131,8 @@ try_eft:
EFT_SIZE, ha->eft, ha->eft_dma);
/* Allocate memory for Extended Trace Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1ef74aa2d00a..2bf5e3e639e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904687b9..dac9a7013208 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d629be..5a31877c9d04 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 949e186cc5d7..cfdfcda28072 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index e2fa3f476227..7bde6c809442 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
alloc_length += PQI_EXTRA_SGL_MEMORY;
ctrl_info->queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->queue_memory_base)
return -ENOMEM;
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
ctrl_info->admin_queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->admin_queue_memory_base_dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->admin_queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->admin_queue_memory_base)
return -ENOMEM;
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
dma_addr_t dma_handle;
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_zalloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, chunk_size, &dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
break;
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
struct device *dev;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_zalloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
+ pqi_ofa_memory = dma_alloc_coherent(dev,
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+ &ctrl_info->pqi_ofa_mem_dma_handle,
+ GFP_KERNEL);
if (!pqi_ofa_memory)
return;
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9436aa83ff1b..e6d48dccb8d5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
return -ENODEV;
}
- if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ if (!dma_alloc_coherent(dev, *size, addr, 0)) {
dev_err(dev, "DMA Alloc memory failed\n");
return -ENODEV;
}
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index d7e4e18ec3df..1ae9af5f17ec 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
int i;
/* allocate coherent DMAable memory for hardware buffer descriptors. */
- sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
- sizeof(*bd) * PESQI_BD_COUNT,
- &sqi->bd_dma, GFP_KERNEL);
+ sqi->bd = dma_alloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
return -ENOMEM;
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 21a76a8ccc26..6027b19f7bc2 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_zalloc_coherent(eth->dev,
- ring->tx_ring_size * sz,
+ ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->tx_dma)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 338b6e952515..dd4898861b83 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
/* Allocate enough storage to hold the page pointers and the page
* list
*/
- pagelist = dma_zalloc_coherent(g_dev,
- pagelist_size,
- &dma_addr,
- GFP_KERNEL);
+ pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
+ GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1ab0e8562d40..c9097e7367d8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv)
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
- vir_pool = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
- priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
- priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
- priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
- &priv->pool_dma, GFP_ATOMIC);
+ vir_pool = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
+ &priv->pool_dma, GFP_ATOMIC);
if (!vir_pool) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
return false;
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv)
priv->rd1_pool_dma = priv->rd0_pool_dma +
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
- priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.tx_descs[0] * PKT_BUF_SZ +
- priv->opts.tx_descs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- &priv->tx_bufs_dma0,
- GFP_ATOMIC);
+ priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
+ &priv->tx_bufs_dma0, GFP_ATOMIC);
if (!priv->tx0_bufs) {
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..ccbd1d34eb2a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc)
/* Refer to BDC spec, Table 4 for description of SPB */
sp_buff_size = 1 << (sp_buff_size + 5);
dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
- bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
- &bdc->scratchpad.sp_dma, GFP_KERNEL);
+ bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma,
+ GFP_KERNEL);
if (!bdc->scratchpad.buff)
goto fail;
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter)
bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
bdc->srr.dqp_index = 0;
/* allocate the status report descriptors */
- bdc->srr.sr_bds = dma_zalloc_coherent(
- bdc->dev,
- NUM_SR_ENTRIES * sizeof(struct bdc_bd),
- &bdc->srr.dma_addr,
- GFP_KERNEL);
+ bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr, GFP_KERNEL);
if (!bdc->srr.sr_bds)
return -ENOMEM;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6218bfe54f52..98deb5f64268 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd)
&uhci_debug_operations);
#endif
- uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
- UHCI_NUMFRAMES * sizeof(*uhci->frame),
- &uhci->frame_dma_handle, GFP_KERNEL);
+ uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ &uhci->frame_dma_handle, GFP_KERNEL);
if (!uhci->frame) {
dev_err(uhci_dev(uhci),
"unable to allocate consistent memory for frame list\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 36a3eb8849f1..8067f178fa84 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
- flags);
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ flags);
if (!buf)
goto fail_sp4;
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_erst_entry *entry;
size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
- erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
- size, &erst->erst_dma_addr, flags);
+ erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, &erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a74096c53cb5..43f2a4816860 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
+ par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
+ &par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
"GLCD: kmalloc for palette buffer failed\n");