diff options
author | Borislav Petkov <bp@suse.de> | 2020-12-01 18:01:27 +0100 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2020-12-01 18:01:27 +0100 |
commit | 87314fb181f9042a226d721ab4a5579ddfca139c (patch) | |
tree | b663e58cfe19ecf5003d4ea6e8048f6ae1925e9f /drivers/block | |
parent | 2002d2951398317d0f46e64ae6d8dd58ed541c6d (diff) | |
parent | b65054597872ce3aefbc6a666385eabdf9e288da (diff) | |
download | linux-87314fb181f9042a226d721ab4a5579ddfca139c.tar.bz2 |
Merge tag 'v5.10-rc6' into x86/cache
Merge -rc6 tag to pick up dependent changes.
Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/loop.c | 3 | ||||
-rw-r--r-- | drivers/block/nbd.c | 10 | ||||
-rw-r--r-- | drivers/block/null_blk.h | 2 | ||||
-rw-r--r-- | drivers/block/null_blk_zoned.c | 138 | ||||
-rw-r--r-- | drivers/block/xsysace.c | 49 |
5 files changed, 150 insertions, 52 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cb1191d6e945..a58084c2ed7c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -255,7 +255,8 @@ static void loop_set_size(struct loop_device *lo, loff_t size) bd_set_nr_sectors(bdev, size); - set_capacity_revalidate_and_notify(lo->lo_disk, size, false); + if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false)) + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } static inline int diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0bed21c0c81b..aaae9220f3a0 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd) } } -static void nbd_size_update(struct nbd_device *nbd) +static void nbd_size_update(struct nbd_device *nbd, bool start) { struct nbd_config *config = nbd->config; struct block_device *bdev = bdget_disk(nbd->disk, 0); @@ -313,7 +313,8 @@ static void nbd_size_update(struct nbd_device *nbd) if (bdev) { if (bdev->bd_disk) { bd_set_nr_sectors(bdev, nr_sectors); - set_blocksize(bdev, config->blksize); + if (start) + set_blocksize(bdev, config->blksize); } else set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); bdput(bdev); @@ -328,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; if (nbd->task_recv != NULL) - nbd_size_update(nbd); + nbd_size_update(nbd, false); } static void nbd_complete_rq(struct request *req) @@ -1308,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd) args->index = i; queue_work(nbd->recv_workq, &args->work); } - nbd_size_update(nbd); + nbd_size_update(nbd, true); return error; } @@ -1517,6 +1518,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode) if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && bdev->bd_openers == 0) nbd_disconnect_and_put(nbd); + bdput(bdev); nbd_config_put(nbd); nbd_put(nbd); diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index d2e7db43a52a..c24d9b5ad81a 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -47,6 +47,8 @@ struct nullb_device { unsigned int nr_zones_closed; struct blk_zone *zones; sector_t zone_size_sects; + spinlock_t zone_lock; + unsigned long *zone_locks; unsigned long size; /* device size in MB */ unsigned long completion_nsec; /* time in ns to complete a request */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 7d94f2d47a6a..beb34b4f76b0 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/vmalloc.h> +#include <linux/bitmap.h> #include "null_blk.h" #define CREATE_TRACE_POINTS @@ -45,6 +46,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM; + /* + * With memory backing, the zone_lock spinlock needs to be temporarily + * released to avoid scheduling in atomic context. To guarantee zone + * information protection, use a bitmap to lock zones with + * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing + * implies that the queue is marked with BLK_MQ_F_BLOCKING. + */ + spin_lock_init(&dev->zone_lock); + if (dev->memory_backed) { + dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); + if (!dev->zone_locks) { + kvfree(dev->zones); + return -ENOMEM; + } + } + if (dev->zone_nr_conv >= dev->nr_zones) { dev->zone_nr_conv = dev->nr_zones - 1; pr_info("changed the number of conventional zones to %u", @@ -123,15 +140,31 @@ int null_register_zoned_dev(struct nullb *nullb) void null_free_zoned_dev(struct nullb_device *dev) { + bitmap_free(dev->zone_locks); kvfree(dev->zones); } +static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) +{ + if (dev->memory_backed) + wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + spin_lock_irq(&dev->zone_lock); +} + +static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) +{ + spin_unlock_irq(&dev->zone_lock); + + if (dev->memory_backed) + clear_and_wake_up_bit(zno, dev->zone_locks); +} + int null_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct nullb *nullb = disk->private_data; struct nullb_device *dev = nullb->dev; - unsigned int first_zone, i; + unsigned int first_zone, i, zno; struct blk_zone zone; int error; @@ -142,15 +175,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector, nr_zones = min(nr_zones, dev->nr_zones - first_zone); trace_nullb_report_zones(nullb, nr_zones); - for (i = 0; i < nr_zones; i++) { + zno = first_zone; + for (i = 0; i < nr_zones; i++, zno++) { /* * Stacked DM target drivers will remap the zone information by * modifying the zone information passed to the report callback. * So use a local copy to avoid corruption of the device zone * array. */ - memcpy(&zone, &dev->zones[first_zone + i], - sizeof(struct blk_zone)); + null_lock_zone(dev, zno); + memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone)); + null_unlock_zone(dev, zno); + error = cb(&zone, i, data); if (error) return error; @@ -159,6 +195,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector, return nr_zones; } +/* + * This is called in the case of memory backing from null_process_cmd() + * with the target zone already locked. + */ size_t null_zone_valid_read_len(struct nullb *nullb, sector_t sector, unsigned int len) { @@ -295,22 +335,26 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + null_lock_zone(dev, zno); + switch (zone->cond) { case BLK_ZONE_COND_FULL: /* Cannot write to a full zone */ - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; + goto unlock; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_CLOSED: ret = null_check_zone_resources(dev, zone); if (ret != BLK_STS_OK) - return ret; + goto unlock; break; case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: break; default: /* Invalid zone condition */ - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; + goto unlock; } /* @@ -326,11 +370,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, else cmd->rq->__sector = sector; } else if (sector != zone->wp) { - return BLK_STS_IOERR; + ret = BLK_STS_IOERR; + goto unlock; } - if (zone->wp + nr_sectors > zone->start + zone->capacity) - return BLK_STS_IOERR; + if (zone->wp + nr_sectors > zone->start + zone->capacity) { + ret = BLK_STS_IOERR; + goto unlock; + } if (zone->cond == BLK_ZONE_COND_CLOSED) { dev->nr_zones_closed--; @@ -341,9 +388,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; + /* + * Memory backing allocation may sleep: release the zone_lock spinlock + * to avoid scheduling in atomic context. Zone operation atomicity is + * still guaranteed through the zone_locks bitmap. + */ + if (dev->memory_backed) + spin_unlock_irq(&dev->zone_lock); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + if (dev->memory_backed) + spin_lock_irq(&dev->zone_lock); + if (ret != BLK_STS_OK) - return ret; + goto unlock; zone->wp += nr_sectors; if (zone->wp == zone->start + zone->capacity) { @@ -353,7 +410,12 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, dev->nr_zones_imp_open--; zone->cond = BLK_ZONE_COND_FULL; } - return BLK_STS_OK; + ret = BLK_STS_OK; + +unlock: + null_unlock_zone(dev, zno); + + return ret; } static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone) @@ -464,16 +526,30 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, sector_t sector) { struct nullb_device *dev = cmd->nq->dev; - unsigned int zone_no = null_zone_no(dev, sector); - struct blk_zone *zone = &dev->zones[zone_no]; - blk_status_t ret = BLK_STS_OK; + unsigned int zone_no; + struct blk_zone *zone; + blk_status_t ret; size_t i; + if (op == REQ_OP_ZONE_RESET_ALL) { + for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { + null_lock_zone(dev, i); + zone = &dev->zones[i]; + if (zone->cond != BLK_ZONE_COND_EMPTY) { + null_reset_zone(dev, zone); + trace_nullb_zone_op(cmd, i, zone->cond); + } + null_unlock_zone(dev, i); + } + return BLK_STS_OK; + } + + zone_no = null_zone_no(dev, sector); + zone = &dev->zones[zone_no]; + + null_lock_zone(dev, zone_no); + switch (op) { - case REQ_OP_ZONE_RESET_ALL: - for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) - null_reset_zone(dev, &dev->zones[i]); - break; case REQ_OP_ZONE_RESET: ret = null_reset_zone(dev, zone); break; @@ -487,30 +563,44 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ret = null_finish_zone(dev, zone); break; default: - return BLK_STS_NOTSUPP; + ret = BLK_STS_NOTSUPP; + break; } if (ret == BLK_STS_OK) trace_nullb_zone_op(cmd, zone_no, zone->cond); + null_unlock_zone(dev, zone_no); + return ret; } blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, sector_t sector, sector_t nr_sectors) { + struct nullb_device *dev = cmd->nq->dev; + unsigned int zno = null_zone_no(dev, sector); + blk_status_t sts; + switch (op) { case REQ_OP_WRITE: - return null_zone_write(cmd, sector, nr_sectors, false); + sts = null_zone_write(cmd, sector, nr_sectors, false); + break; case REQ_OP_ZONE_APPEND: - return null_zone_write(cmd, sector, nr_sectors, true); + sts = null_zone_write(cmd, sector, nr_sectors, true); + break; case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: - return null_zone_mgmt(cmd, op, sector); + sts = null_zone_mgmt(cmd, op, sector); + break; default: - return null_process_cmd(cmd, op, sector, nr_sectors); + null_lock_zone(dev, zno); + sts = null_process_cmd(cmd, op, sector, nr_sectors); + null_unlock_zone(dev, zno); } + + return sts; } diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 8d581c7536fb..eb8ef65778c3 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id) #define ACE_FSM_NUM_STATES 11 /* Set flag to exit FSM loop and reschedule tasklet */ -static inline void ace_fsm_yield(struct ace_device *ace) +static inline void ace_fsm_yieldpoll(struct ace_device *ace) { - dev_dbg(ace->dev, "ace_fsm_yield()\n"); tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } +static inline void ace_fsm_yield(struct ace_device *ace) +{ + dev_dbg(ace->dev, "%s()\n", __func__); + ace_fsm_yieldpoll(ace); +} + /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ static inline void ace_fsm_yieldirq(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); - if (!ace->irq) - /* No IRQ assigned, so need to poll */ - tasklet_schedule(&ace->fsm_tasklet); - ace->fsm_continue_flag = 0; + if (ace->irq > 0) + ace->fsm_continue_flag = 0; + else + ace_fsm_yieldpoll(ace); } static bool ace_has_next_request(struct request_queue *q) @@ -1053,12 +1058,12 @@ static int ace_setup(struct ace_device *ace) ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ - if (ace->irq) { + if (ace->irq > 0) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); - ace->irq = 0; + ace->irq = rc; } } @@ -1110,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace) tasklet_kill(&ace->fsm_tasklet); - if (ace->irq) + if (ace->irq > 0) free_irq(ace->irq, ace); iounmap(ace->baseaddr); @@ -1123,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, int rc; dev_dbg(dev, "ace_alloc(%p)\n", dev); - if (!physaddr) { - rc = -ENODEV; - goto err_noreg; - } - /* Allocate and initialize the ace device structure */ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); if (!ace) { @@ -1153,7 +1153,6 @@ err_setup: dev_set_drvdata(dev, NULL); kfree(ace); err_alloc: -err_noreg: dev_err(dev, "could not initialize device, err=%i\n", rc); return rc; } @@ -1176,10 +1175,11 @@ static void ace_free(struct device *dev) static int ace_probe(struct platform_device *dev) { - resource_size_t physaddr = 0; int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ + resource_size_t physaddr; + struct resource *res; u32 id = dev->id; - int irq = 0; + int irq; int i; dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); @@ -1190,12 +1190,15 @@ static int ace_probe(struct platform_device *dev) if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; - for (i = 0; i < dev->num_resources; i++) { - if (dev->resource[i].flags & IORESOURCE_MEM) - physaddr = dev->resource[i].start; - if (dev->resource[i].flags & IORESOURCE_IRQ) - irq = dev->resource[i].start; - } + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + physaddr = res->start; + if (!physaddr) + return -ENODEV; + + irq = platform_get_irq_optional(dev, 0); /* Call the bus-independent setup code */ return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); |