From 4520a91a976e3f5e74d3d27ccc66a7a669826373 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 9 Dec 2020 11:24:13 +0100 Subject: s390/cio: use dma helpers for setting masks Bypassing the DMA API is bad style, even when we don't expect any actual problems. Let's utilize the right API helpers for setting the DMA masks and check for returned errors, so that we benefit from common sanity checks. io_subchannel_allocate_dev() required some extra massaging, so that we can return an errno other than -ENOMEM. Signed-off-by: Julian Wiedmann Reviewed-by: Halil Pasic Signed-off-by: Vasily Gorbik --- drivers/s390/cio/css.c | 20 ++++++++++++++------ drivers/s390/cio/device.c | 26 ++++++++++++++++++++------ 2 files changed, 34 insertions(+), 12 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 94c6470de635..253ab4e7a415 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -225,18 +225,23 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; + sch->dev.dma_mask = &sch->dma_mask; device_initialize(&sch->dev); /* - * The physical addresses of some the dma structures that can + * The physical addresses for some of the dma structures that can * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ - sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); + ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31)); + if (ret) + goto err; /* * But we don't have such restrictions imposed on the stuff that * is handled by the streaming API. */ - sch->dma_mask = DMA_BIT_MASK(64); - sch->dev.dma_mask = &sch->dma_mask; + ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64)); + if (ret) + goto err; + return sch; err: @@ -970,8 +975,11 @@ static int __init setup_css(int nr) * css->device as the device argument with the DMA API) * and are fine with 64 bit addresses. */ - css->device.coherent_dma_mask = DMA_BIT_MASK(64); - css->device.dma_mask = &css->device.coherent_dma_mask; + ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64)); + if (ret) { + kfree(css); + goto out_err; + } mutex_init(&css->mutex); ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index df46373d67ec..3f026021e95e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -679,33 +679,47 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { struct ccw_device *cdev; struct gen_pool *dma_pool; + int ret; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); - if (!cdev) + if (!cdev) { + ret = -ENOMEM; goto err_cdev; + } cdev->private = kzalloc(sizeof(struct ccw_device_private), GFP_KERNEL | GFP_DMA); - if (!cdev->private) + if (!cdev->private) { + ret = -ENOMEM; goto err_priv; - cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; + } + cdev->dev.dma_mask = sch->dev.dma_mask; + ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); + if (ret) + goto err_coherent_mask; + dma_pool = cio_gp_dma_create(&cdev->dev, 1); - if (!dma_pool) + if (!dma_pool) { + ret = -ENOMEM; goto err_dma_pool; + } cdev->private->dma_pool = dma_pool; cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, sizeof(*cdev->private->dma_area)); - if (!cdev->private->dma_area) + if (!cdev->private->dma_area) { + ret = -ENOMEM; goto err_dma_area; + } return cdev; err_dma_area: cio_gp_dma_destroy(dma_pool, &cdev->dev); err_dma_pool: +err_coherent_mask: kfree(cdev->private); err_priv: kfree(cdev); err_cdev: - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } static void ccw_device_todo(struct work_struct *work); -- cgit v1.2.3