diff options
author | David S. Miller <davem@davemloft.net> | 2019-03-27 17:37:58 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-27 17:37:58 -0700 |
commit | 356d71e00d278d865f8c7f68adebd6ce4698a7e2 (patch) | |
tree | 8cb2dabf7440b56c6dee76b4253210c6b603b98e /drivers | |
parent | df453700e8d81b1bdafdf684365ee2b9431fb702 (diff) | |
parent | 1a9df9e29c2afecf6e3089442d429b377279ca3c (diff) | |
download | linux-356d71e00d278d865f8c7f68adebd6ce4698a7e2.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers')
268 files changed, 3452 insertions, 2156 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index df8979008dd4..5a389a4f4f65 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2956,11 +2956,15 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; - if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) + if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { ndr_desc->numa_node = acpi_map_pxm_to_online_node( spa->proximity_domain); - else + ndr_desc->target_node = acpi_map_pxm_to_node( + spa->proximity_domain); + } else { ndr_desc->numa_node = NUMA_NO_NODE; + ndr_desc->target_node = NUMA_NO_NODE; + } /* * Persistence domain bits are hierarchical, if diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 7bbbf8256a41..867f6e3f2b4f 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -84,6 +84,7 @@ int acpi_map_pxm_to_node(int pxm) return node; } +EXPORT_SYMBOL(acpi_map_pxm_to_node); /** * acpi_map_pxm_to_online_node - Map proximity ID to online node diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 78db97687f26..c4b06cc075f9 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) match.hrv = hrv; dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + put_device(dev); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 41b706403ef7..b4dae624b9af 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -26,19 +26,36 @@ #define to_amba_driver(d) container_of(d, struct amba_driver, drv) -static const struct amba_id * -amba_lookup(const struct amba_id *table, struct amba_device *dev) +/* called on periphid match and class 0x9 coresight device. */ +static int +amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev) { int ret = 0; + struct amba_cs_uci_id *uci; + + uci = table->data; + /* no table data or zero mask - return match on periphid */ + if (!uci || (uci->devarch_mask == 0)) + return 1; + + /* test against read devtype and masked devarch value */ + ret = (dev->uci.devtype == uci->devtype) && + ((dev->uci.devarch & uci->devarch_mask) == uci->devarch); + return ret; +} + +static const struct amba_id * +amba_lookup(const struct amba_id *table, struct amba_device *dev) +{ while (table->mask) { - ret = (dev->periphid & table->mask) == table->id; - if (ret) - break; + if (((dev->periphid & table->mask) == table->id) && + ((dev->cid != CORESIGHT_CID) || + (amba_cs_uci_id_match(table, dev)))) + return table; table++; } - - return ret ? table : NULL; + return NULL; } static int amba_match(struct device *dev, struct device_driver *drv) @@ -399,10 +416,22 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); + if (cid == CORESIGHT_CID) { + /* set the base to the start of the last 4k block */ + void __iomem *csbase = tmp + size - 4096; + + dev->uci.devarch = + readl(csbase + UCI_REG_DEVARCH_OFFSET); + dev->uci.devtype = + readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff; + } + amba_put_disable_pclk(dev); - if (cid == AMBA_CID || cid == CORESIGHT_CID) + if (cid == AMBA_CID || cid == CORESIGHT_CID) { dev->periphid = pid; + dev->cid = cid; + } if (!dev->periphid) ret = -ENODEV; diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 57410f9c5d44..c52c738e554a 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -164,9 +164,7 @@ config ARM_CHARLCD line and the Linux version on the second line, but that's still useful. -endif # AUXDISPLAY - -menuconfig PANEL +menuconfig PARPORT_PANEL tristate "Parallel port LCD/Keypad Panel support" depends on PARPORT select CHARLCD @@ -178,7 +176,7 @@ menuconfig PANEL compiled as a module, or linked into the kernel and started at boot. If you don't understand what all this is about, say N. -if PANEL +if PARPORT_PANEL config PANEL_PARPORT int "Default parallel port number (0=LPT1)" @@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL Default for the 'BL' pin in custom profile is '0' (uncontrolled). +endif # PARPORT_PANEL + config PANEL_CHANGE_MESSAGE bool "Change LCD initialization message ?" + depends on CHARLCD default "n" ---help--- This allows you to replace the boot message indicating the kernel version @@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE An empty message will only clear the display at driver init time. Any other printf()-formatted message is valid with newline and escape codes. -endif # PANEL +choice + prompt "Backlight initial state" + default CHARLCD_BL_FLASH + + config CHARLCD_BL_OFF + bool "Off" + help + Backlight is initially turned off + + config CHARLCD_BL_ON + bool "On" + help + Backlight is initially turned on + + config CHARLCD_BL_FLASH + bool "Flash" + help + Backlight is flashed briefly on init + +endchoice + +endif # AUXDISPLAY + +config PANEL + tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)" + depends on PARPORT + select AUXDISPLAY + select PARPORT_PANEL config CHARLCD tristate "Character LCD core support" if COMPILE_TEST diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile index 7ac6776ca3f6..cf54b5efb07e 100644 --- a/drivers/auxdisplay/Makefile +++ b/drivers/auxdisplay/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o obj-$(CONFIG_HD44780) += hd44780.o obj-$(CONFIG_HT16K33) += ht16k33.o -obj-$(CONFIG_PANEL) += panel.o +obj-$(CONFIG_PARPORT_PANEL) += panel.o diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 60e0b772673f..92745efefb54 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -91,7 +91,7 @@ struct charlcd_priv { unsigned long long drvdata[0]; }; -#define to_priv(p) container_of(p, struct charlcd_priv, lcd) +#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd) /* Device single-open policy control */ static atomic_t charlcd_available = ATOMIC_INIT(1); @@ -105,7 +105,7 @@ static void long_sleep(int ms) /* turn the backlight on or off */ static void charlcd_backlight(struct charlcd *lcd, int on) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; @@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work) /* turn the backlight on for a little while */ void charlcd_poke(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; @@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke); static void charlcd_gotoxy(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); unsigned int addr; /* @@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd) static void charlcd_home(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); priv->addr.x = 0; priv->addr.y = 0; @@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd) static void charlcd_print(struct charlcd *lcd, char c) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (priv->addr.x < lcd->bwidth) { if (lcd->char_conv) @@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd) /* clears the display and resets X/Y */ static void charlcd_clear_display(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR); priv->addr.x = 0; @@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd) static int charlcd_init_display(struct charlcd *lcd) { void (*write_cmd_raw)(struct charlcd *lcd, int cmd); - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); u8 init; if (lcd->ifwidth != 4 && lcd->ifwidth != 8) @@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y) static inline int handle_lcd_special_code(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); /* LCD special codes */ @@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd) static void charlcd_write_char(struct charlcd *lcd, char c) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); /* first, we'll test if we're in escape mode */ if ((c != '\n') && priv->esc_seq.len >= 0) { @@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, static int charlcd_open(struct inode *inode, struct file *file) { - struct charlcd_priv *priv = to_priv(the_charlcd); + struct charlcd_priv *priv = charlcd_to_priv(the_charlcd); int ret; ret = -EBUSY; @@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s) } } +#ifdef CONFIG_PANEL_BOOT_MESSAGE +#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE +#else +#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n" +#endif + +#ifdef CONFIG_CHARLCD_BL_ON +#define LCD_INIT_BL "\x1b[L+" +#elif defined(CONFIG_CHARLCD_BL_FLASH) +#define LCD_INIT_BL "\x1b[L*" +#else +#define LCD_INIT_BL "\x1b[L-" +#endif + /* initialize the LCD driver */ static int charlcd_init(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); int ret; if (lcd->ops->backlight) { @@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd) return ret; /* display a short message */ -#ifdef CONFIG_PANEL_CHANGE_MESSAGE -#ifdef CONFIG_PANEL_BOOT_MESSAGE - charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE); -#endif -#else - charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n"); -#endif + charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT); + /* clear the display on the next device opening */ priv->must_clear = true; charlcd_home(lcd); @@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size) } EXPORT_SYMBOL_GPL(charlcd_alloc); +void charlcd_free(struct charlcd *lcd) +{ + kfree(charlcd_to_priv(lcd)); +} +EXPORT_SYMBOL_GPL(charlcd_free); + static int panel_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { @@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register); int charlcd_unregister(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); unregister_reboot_notifier(&panel_notifier); charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-"); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index 9ad93ea42fdc..ab15b64707ad 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev) return 0; fail: - kfree(lcd); + charlcd_free(lcd); return ret; } @@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev) struct charlcd *lcd = platform_get_drvdata(pdev); charlcd_unregister(lcd); + + charlcd_free(lcd); return 0; } diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 21b9b2f2470a..e06de63497cf 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1620,7 +1620,7 @@ err_lcd_unreg: if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; parport_unregister_device(pprt); pprt = NULL; @@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port) if (lcd.enabled) { charlcd_unregister(lcd.charlcd); lcd.initialized = false; - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 048cbf7d5233..cb8347500ce2 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -88,6 +88,7 @@ unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; } +EXPORT_SYMBOL_GPL(memory_block_size_bytes); static unsigned long get_memory_block_size(void) { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 76c9969b7124..96a6dc9d305c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_lock(genpd); - ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; + genpd_lock(genpd); + dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; @@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - out: genpd_unlock(genpd); - + out: if (ret) genpd_free_dev_data(dev, gpd_data); else @@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; - if (genpd->detach_dev) - genpd->detach_dev(genpd, dev); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + genpd_free_dev_data(dev, gpd_data); return 0; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 1fad9291f6aa..7fc5a18e02ad 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode, val, nval); } -struct fwnode_handle * +static struct fwnode_handle * software_node_get_parent(const struct fwnode_handle *fwnode) { struct software_node *swnode = to_software_node(fwnode); @@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode) NULL; } -struct fwnode_handle * +static struct fwnode_handle * software_node_get_next_child(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1e6edd568214..bf1c61cab8eb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) return -EBADF; l = f->f_mapping->host->i_bdev->bd_disk->private_data; - if (l->lo_state == Lo_unbound) { + if (l->lo_state != Lo_bound) { return -EINVAL; } f = l->lo_backing_file; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 96670eefaeb2..377a694dc228 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -749,8 +749,12 @@ static int pcd_detect(void) return 0; printk("%s: No CD-ROM drive found\n", name); - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + blk_cleanup_queue(cd->disk->queue); + cd->disk->queue = NULL; + blk_mq_free_tag_set(&cd->tag_set); put_disk(cd->disk); + } pi_unregister_driver(par_drv); return -1; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index e92e7a8eeeb2..103b617cdc31 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -761,8 +761,12 @@ static int pf_detect(void) return 0; printk("%s: No ATAPI disk detected\n", name); - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + blk_cleanup_queue(pf->disk->queue); + pf->disk->queue = NULL; + blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); + } pi_unregister_driver(par_drv); return -1; } @@ -1047,13 +1051,15 @@ static void __exit pf_exit(void) int unit; unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { - if (!pf->present) - continue; - del_gendisk(pf->disk); + if (pf->present) + del_gendisk(pf->disk); + blk_cleanup_queue(pf->disk->queue); blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); - pi_release(pf->pi); + + if (pf->present) + pi_release(pf->pi); } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4ba967d65cf9..2210c1b9491b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private) pctx->opts->queue_depth = intval; break; case Opt_alloc_size: - if (intval < 1) { + if (intval < SECTOR_SIZE) { pr_err("alloc_size out of range\n"); return -EINVAL; } @@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc) kref_put(&rbdc->kref, rbd_client_release); } -static int wait_for_latest_osdmap(struct ceph_client *client) -{ - u64 newest_epoch; - int ret; - - ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch); - if (ret) - return ret; - - if (client->osdc.osdmap->epoch >= newest_epoch) - return 0; - - ceph_osdc_maybe_request_map(&client->osdc); - return ceph_monc_wait_osdmap(&client->monc, newest_epoch, - client->options->mount_timeout); -} - /* * Get a ceph client with specific addr and configuration, if one does * not exist create it. Either way, ceph_opts is consumed by this @@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) * Using an existing client. Make sure ->pg_pools is up to * date before we look up the pool id in do_rbd_add(). */ - ret = wait_for_latest_osdmap(rbdc->client); + ret = ceph_wait_for_latest_osdmap(rbdc->client, + rbdc->client->options->mount_timeout); if (ret) { rbd_warn(NULL, "failed to get latest osdmap: %d", ret); rbd_put_client(rbdc); @@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, objset_bytes); - blk_queue_io_opt(q, objset_bytes); + blk_queue_io_min(q, rbd_dev->opts->alloc_size); + blk_queue_io_opt(q, rbd_dev->opts->alloc_size); if (rbd_dev->opts->trim) { blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = objset_bytes; + q->limits.discard_granularity = rbd_dev->opts->alloc_size; blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index a4bc74e72c39..24896ffb04ed 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) int err, i, j; struct xen_blkif *blkif = ring->blkif; struct xenbus_device *dev = blkif->be->dev; - unsigned int ring_page_order, nr_grefs, evtchn; + unsigned int nr_grefs, evtchn; err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", &evtchn); @@ -936,43 +936,42 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) return err; } - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", - &ring_page_order); - if (err != 1) { - err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); + nr_grefs = blkif->nr_ring_pages; + + if (unlikely(!nr_grefs)) { + WARN_ON(true); + return -EINVAL; + } + + for (i = 0; i < nr_grefs; i++) { + char ring_ref_name[RINGREF_NAME_LEN]; + + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, + "%u", &ring_ref[i]); + if (err != 1) { + if (nr_grefs == 1) + break; + err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); + xenbus_dev_fatal(dev, err, "reading %s/%s", + dir, ring_ref_name); return err; } - nr_grefs = 1; - } else { - unsigned int i; + } - if (ring_page_order > xen_blkif_max_ring_order) { + if (err != 1) { + WARN_ON(nr_grefs != 1); + + err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", + &ring_ref[0]); + if (err != 1) { err = -EINVAL; - xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", - dir, ring_page_order, - xen_blkif_max_ring_order); + xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); return err; } - - nr_grefs = 1 << ring_page_order; - for (i = 0; i < nr_grefs; i++) { - char ring_ref_name[RINGREF_NAME_LEN]; - - snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); - err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, - "%u", &ring_ref[i]); - if (err != 1) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/%s", - dir, ring_ref_name); - return err; - } - } } - blkif->nr_ring_pages = nr_grefs; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -1023,6 +1022,7 @@ fail: static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; + struct xen_blkif *blkif = be->blkif; unsigned int pers_grants; char protocol[64] = ""; int err, i; @@ -1030,28 +1030,29 @@ static int connect_ring(struct backend_info *be) size_t xspathsize; const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ unsigned int requested_num_queues = 0; + unsigned int ring_page_order; pr_debug("%s %s\n", __func__, dev->otherend); - be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; + blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol", "%63s", protocol); if (err <= 0) strcpy(protocol, "unspecified, assuming default"); else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; + blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) - be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; + blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; else { xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -ENOSYS; } pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", 0); - be->blkif->vbd.feature_gnt_persistent = pers_grants; - be->blkif->vbd.overflow_max_grants = 0; + blkif->vbd.feature_gnt_persistent = pers_grants; + blkif->vbd.overflow_max_grants = 0; /* * Read the number of hardware queues from frontend. @@ -1067,16 +1068,30 @@ static int connect_ring(struct backend_info *be) requested_num_queues, xenblk_max_queues); return -ENOSYS; } - be->blkif->nr_rings = requested_num_queues; - if (xen_blkif_alloc_rings(be->blkif)) + blkif->nr_rings = requested_num_queues; + if (xen_blkif_alloc_rings(blkif)) return -ENOMEM; pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename, - be->blkif->nr_rings, be->blkif->blk_protocol, protocol, + blkif->nr_rings, blkif->blk_protocol, protocol, pers_grants ? "persistent grants" : ""); - if (be->blkif->nr_rings == 1) - return read_per_ring_refs(&be->blkif->rings[0], dev->otherend); + ring_page_order = xenbus_read_unsigned(dev->otherend, + "ring-page-order", 0); + + if (ring_page_order > xen_blkif_max_ring_order) { + err = -EINVAL; + xenbus_dev_fatal(dev, err, + "requested ring page order %d exceed max:%d", + ring_page_order, + xen_blkif_max_ring_order); + return err; + } + + blkif->nr_ring_pages = 1 << ring_page_order; + + if (blkif->nr_rings == 1) + return read_per_ring_refs(&blkif->rings[0], dev->otherend); else { xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; xspath = kmalloc(xspathsize, GFP_KERNEL); @@ -1085,10 +1100,10 @@ static int connect_ring(struct backend_info *be) return -ENOMEM; } - for (i = 0; i < be->blkif->nr_rings; i++) { + for (i = 0; i < blkif->nr_rings; i++) { memset(xspath, 0, xspathsize); snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i); - err = read_per_ring_refs(&be->blkif->rings[i], xspath); + err = read_per_ring_refs(&blkif->rings[i], xspath); if (err) { kfree(xspath); return err; diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index a8b20b65bd4b..aa4ec53281ce 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1261,6 +1261,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) return ARCH_TIMER_PHYS_SECURE_PPI; } +static void __init arch_timer_populate_kvm_info(void) +{ + arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; + if (is_kernel_in_hyp_mode()) + arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; +} + static int __init arch_timer_of_init(struct device_node *np) { int i, ret; @@ -1275,7 +1282,7 @@ static int __init arch_timer_of_init(struct device_node *np) for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) arch_timer_ppi[i] = irq_of_parse_and_map(np, i); - arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; + arch_timer_populate_kvm_info(); rate = arch_timer_get_cntfrq(); arch_timer_of_configure_rate(rate, np); @@ -1605,7 +1612,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table) arch_timer_ppi[ARCH_TIMER_HYP_PPI] = acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); - arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; + arch_timer_populate_kvm_info(); /* * When probing via ACPI, we have no mechanism to override the sysreg diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index a8dd80576c95..857f8c086274 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void) return ~readw(tcd); } -static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) +static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base) { - unsigned long rate; - - if (!base) - return -ENOMEM; - if (IS_ERR(clock)) - return PTR_ERR(clock); - - rate = clk_get_rate(clock); + unsigned long rate = clk_get_rate(clock); tcd = base; @@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) clocksource_mmio_readw_down); sched_clock_register(clps711x_sched_clock_read, 16, rate); - - return 0; } static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) @@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, struct clock_event_device *clkevt; unsigned long rate; - if (!irq) - return -EINVAL; - if (!base) - return -ENOMEM; - if (IS_ERR(clock)) - return PTR_ERR(clock); - clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); if (!clkevt) return -ENOMEM; @@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, "clps711x-timer", clkevt); } -void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base, - unsigned int irq) -{ - struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL); - struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL); - - BUG_ON(_clps711x_clksrc_init(tc1, tc1_base)); - BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq)); -} - -#ifdef CONFIG_TIMER_OF static int __init clps711x_timer_init(struct device_node *np) { unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); void __iomem *base = of_iomap(np, 0); + if (!base) + return -ENOMEM; + if (!irq) + return -EINVAL; + if (IS_ERR(clock)) + return PTR_ERR(clock); + switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: - return _clps711x_clksrc_init(clock, base); + clps711x_clksrc_init(clock, base); + break; case CLPS711X_CLKSRC_CLOCKEVENT: return _clps711x_clkevt_init(clock, base, irq); default: return -EINVAL; } + + return 0; } TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); -#endif diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 54f8a331b53a..37671a5d4ed9 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -struct irqaction gic_compare_irqaction = { +static struct irqaction gic_compare_irqaction = { .handler = gic_compare_interrupt, .percpu_dev_id = &gic_clockevent_device, .flags = IRQF_PERCPU | IRQF_TIMER, diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 43f4d5c4d6fa..f987027ca566 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs) return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); } -void tc_clksrc_suspend(struct clocksource *cs) +static void tc_clksrc_suspend(struct clocksource *cs) { int i; @@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs) bmr_cache = readl(tcaddr + ATMEL_TC_BMR); } -void tc_clksrc_resume(struct clocksource *cs) +static void tc_clksrc_resume(struct clocksource *cs) { int i; diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index e8163693e936..5e6038fbf115 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void) static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { .name = "riscv_clocksource", .rating = 300, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .read = riscv_clocksource_rdtime, }; @@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n) return error; } - sched_clock_register(riscv_sched_clock, - BITS_PER_LONG, riscv_timebase); + sched_clock_register(riscv_sched_clock, 64, riscv_timebase); error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, "clockevents/riscv/timer:starting", diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index c364027638e1..3352da6ed61f 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -586,8 +586,8 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, } /* Optimized set_load which removes costly spin wait in timer_start */ -int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, - unsigned int load) +static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, + int autoreload, unsigned int load) { u32 l; diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index e0700bf4893a..5ef624fe3934 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -23,12 +23,38 @@ config DEV_DAX config DEV_DAX_PMEM tristate "PMEM DAX: direct access to persistent memory" depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX + depends on m # until we can kill DEV_DAX_PMEM_COMPAT default DEV_DAX help Support raw access to persistent memory. Note that this driver consumes memory ranges allocated and exported by the libnvdimm sub-system. - Say Y if unsure + Say M if unsure + +config DEV_DAX_KMEM + tristate "KMEM DAX: volatile-use of persistent memory" + default DEV_DAX + depends on DEV_DAX + depends on MEMORY_HOTPLUG # for add_memory() and friends + help + Support access to persistent memory as if it were RAM. This + allows easier use of persistent memory by unmodified + applications. + + To use this feature, a DAX device must be unbound from the + device_dax driver (PMEM DAX) and bound to this kmem driver + on each boot. + + Say N if unsure. + +config DEV_DAX_PMEM_COMPAT + tristate "PMEM DAX: support the deprecated /sys/class/dax interface" + depends on DEV_DAX_PMEM + default DEV_DAX_PMEM + help + Older versions of the libdaxctl library expect to find all + device-dax instances under /sys/class/dax. If libdaxctl in + your distribution is older than v58 say M, otherwise say N. endif diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile index 574286fac87c..81f7d54dadfb 100644 --- a/drivers/dax/Makefile +++ b/drivers/dax/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DAX) += dax.o obj-$(CONFIG_DEV_DAX) += device_dax.o -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o +obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o dax-y := super.o -dax_pmem-y := pmem.o +dax-y += bus.o device_dax-y := device.o + +obj-y += pmem/ diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c new file mode 100644 index 000000000000..2109cfe80219 --- /dev/null +++ b/drivers/dax/bus.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ +#include <linux/memremap.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/dax.h> +#include "dax-private.h" +#include "bus.h" + +static struct class *dax_class; + +static DEFINE_MUTEX(dax_bus_lock); + +#define DAX_NAME_LEN 30 +struct dax_id { + struct list_head list; + char dev_name[DAX_NAME_LEN]; +}; + +static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + /* + * We only ever expect to handle device-dax instances, i.e. the + * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero + */ + return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); +} + +static struct dax_device_driver *to_dax_drv(struct device_driver *drv) +{ + return container_of(drv, struct dax_device_driver, drv); +} + +static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, + const char *dev_name) +{ + struct dax_id *dax_id; + + lockdep_assert_held(&dax_bus_lock); + + list_for_each_entry(dax_id, &dax_drv->ids, list) + if (sysfs_streq(dax_id->dev_name, dev_name)) + return dax_id; + return NULL; +} + +static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) +{ + int match; + + mutex_lock(&dax_bus_lock); + match = !!__dax_match_id(dax_drv, dev_name(dev)); + mutex_unlock(&dax_bus_lock); + + return match; +} + +enum id_action { + ID_REMOVE, + ID_ADD, +}; + +static ssize_t do_id_store(struct device_driver *drv, const char *buf, + size_t count, enum id_action action) +{ + struct dax_device_driver *dax_drv = to_dax_drv(drv); + unsigned int region_id, id; + char devname[DAX_NAME_LEN]; + struct dax_id *dax_id; + ssize_t rc = count; + int fields; + + fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); + if (fields != 2) + return -EINVAL; + sprintf(devname, "dax%d.%d", region_id, id); + if (!sysfs_streq(buf, devname)) + return -EINVAL; + + mutex_lock(&dax_bus_lock); + dax_id = __dax_match_id(dax_drv, buf); + if (!dax_id) { + if (action == ID_ADD) { + dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); + if (dax_id) { + strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); + list_add(&dax_id->list, &dax_drv->ids); + } else + rc = -ENOMEM; + } else + /* nothing to remove */; + } else if (action == ID_REMOVE) { + list_del(&dax_id->list); + kfree(dax_id); + } else + /* dax_id already added */; + mutex_unlock(&dax_bus_lock); + + if (rc < 0) + return rc; + if (action == ID_ADD) + rc = driver_attach(drv); + if (rc) + return rc; + return count; +} + +static ssize_t new_id_store(struct device_driver *drv, const char *buf, + size_t count) +{ + return do_id_store(drv, buf, count, ID_ADD); +} +static DRIVER_ATTR_WO(new_id); + +static ssize_t remove_id_store(struct device_driver *drv, const char *buf, + size_t count) +{ + return do_id_store(drv, buf, count, ID_REMOVE); +} +static DRIVER_ATTR_WO(remove_id); + +static struct attribute *dax_drv_attrs[] = { + &driver_attr_new_id.attr, + &driver_attr_remove_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dax_drv); + +static int dax_bus_match(struct device *dev, struct device_driver *drv); + +static struct bus_type dax_bus_type = { + .name = "dax", + .uevent = dax_bus_uevent, + .match = dax_bus_match, + .drv_groups = dax_drv_groups, +}; + +static int dax_bus_match(struct device *dev, struct device_driver *drv) +{ + struct dax_device_driver *dax_drv = to_dax_drv(drv); + + /* + * All but the 'device-dax' driver, which has 'match_always' + * set, requires an exact id match. + */ + if (dax_drv->match_always) + return 1; + + return dax_match_id(dax_drv, dev); +} + +/* + * Rely on the fact that drvdata is set before the attributes are + * registered, and that the attributes are unregistered before drvdata + * is cleared to assume that drvdata is always valid. + */ +static ssize_t id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", dax_region->id); +} +static DEVICE_ATTR_RO(id); + +static ssize_t region_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%llu\n", (unsigned long long) + resource_size(&dax_region->res)); +} +static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, + region_size_show, NULL); + +static ssize_t align_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", dax_region->align); +} +static DEVICE_ATTR_RO(align); + +static struct attribute *dax_region_attributes[] = { + &dev_attr_region_size.attr, + &dev_attr_align.attr, + &dev_attr_id.attr, + NULL, +}; + +static const struct attribute_group dax_region_attribute_group = { + .name = "dax_region", + .attrs = dax_region_attributes, +}; + +static const struct attribute_group *dax_region_attribute_groups[] = { + &dax_region_attribute_group, + NULL, +}; + +static void dax_region_free(struct kref *kref) +{ + struct dax_region *dax_region; + + dax_region = container_of(kref, struct dax_region, kref); + kfree(dax_region); +} + +void dax_region_put(struct dax_region *dax_region) +{ + kref_put(&dax_region->kref, dax_region_free); +} +EXPORT_SYMBOL_GPL(dax_region_put); + +static void dax_region_unregister(void *region) +{ + struct dax_region *dax_region = region; + + sysfs_remove_groups(&dax_region->dev->kobj, + dax_region_attribute_groups); + dax_region_put(dax_region); +} + +struct dax_region *alloc_dax_region(struct device *parent, int region_id, + struct resource *res, int target_node, unsigned int align, + unsigned long pfn_flags) +{ + struct dax_region *dax_region; + + /* + * The DAX core assumes that it can store its private data in + * parent->driver_data. This WARN is a reminder / safeguard for + * developers of device-dax drivers. + */ + if (dev_get_drvdata(parent)) { + dev_WARN(parent, "dax core failed to setup private data\n"); + return NULL; + } + + if (!IS_ALIGNED(res->start, align) + || !IS_ALIGNED(resource_size(res), align)) + return NULL; + + dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); + if (!dax_region) + return NULL; + + dev_set_drvdata(parent, dax_region); + memcpy(&dax_region->res, res, sizeof(*res)); + dax_region->pfn_flags = pfn_flags; + kref_init(&dax_region->kref); + dax_region->id = region_id; + dax_region->align = align; + dax_region->dev = parent; + dax_region->target_node = target_node; + if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { + kfree(dax_region); + return NULL; + } + + kref_get(&dax_region->kref); + if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) + return NULL; + return dax_region; +} +EXPORT_SYMBOL_GPL(alloc_dax_region); + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + unsigned long long size = resource_size(&dev_dax->region->res); + + return sprintf(buf, "%llu\n", size); +} +static DEVICE_ATTR_RO(size); + +static int dev_dax_target_node(struct dev_dax *dev_dax) +{ + struct dax_region *dax_region = dev_dax->region; + + return dax_region->target_node; +} + +static ssize_t target_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + + return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); +} +static DEVICE_ATTR_RO(target_node); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + /* + * We only ever expect to handle device-dax instances, i.e. the + * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero + */ + return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); +} +static DEVICE_ATTR_RO(modalias); + +static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dev_dax *dev_dax = to_dev_dax(dev); + + if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) + return 0; + return a->mode; +} + +static struct attribute *dev_dax_attributes[] = { + &dev_attr_modalias.attr, + &dev_attr_size.attr, + &dev_attr_target_node.attr, + NULL, +}; + +static const struct attribute_group dev_dax_attribute_group = { + .attrs = dev_dax_attributes, + .is_visible = dev_dax_visible, +}; + +static const struct attribute_group *dax_attribute_groups[] = { + &dev_dax_attribute_group, + NULL, +}; + +void kill_dev_dax(struct dev_dax *dev_dax) +{ + struct dax_device *dax_dev = dev_dax->dax_dev; + struct inode *inode = dax_inode(dax_dev); + + kill_dax(dax_dev); + unmap_mapping_range(inode->i_mapping, 0, 0, 1); +} +EXPORT_SYMBOL_GPL(kill_dev_dax); + +static void dev_dax_release(struct device *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + struct dax_region *dax_region = dev_dax->region; + struct dax_device *dax_dev = dev_dax->dax_dev; + + dax_region_put(dax_region); + put_dax(dax_dev); + kfree(dev_dax); +} + +static void unregister_dev_dax(void *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + + dev_dbg(dev, "%s\n", __func__); + + kill_dev_dax(dev_dax); + device_del(dev); + put_device(dev); +} + +struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id, + struct dev_pagemap *pgmap, enum dev_dax_subsys subsys) +{ + struct device *parent = dax_region->dev; + struct dax_device *dax_dev; + struct dev_dax *dev_dax; + struct inode *inode; + struct device *dev; + int rc = -ENOMEM; + + if (id < 0) + return ERR_PTR(-EINVAL); + + dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); + if (!dev_dax) + return ERR_PTR(-ENOMEM); + + memcpy(&dev_dax->pgmap, pgmap, sizeof(*pgmap)); + + /* + * No 'host' or dax_operations since there is no access to this + * device outside of mmap of the resulting character device. + */ + dax_dev = alloc_dax(dev_dax, NULL, NULL); + if (!dax_dev) + goto err; + + /* a device_dax instance is dead while the driver is not attached */ + kill_dax(dax_dev); + + /* from here on we're committed to teardown via dax_dev_release() */ + dev = &dev_dax->dev; + device_initialize(dev); + + dev_dax->dax_dev = dax_dev; + dev_dax->region = dax_region; + dev_dax->target_node = dax_region->target_node; + kref_get(&dax_region->kref); + + inode = dax_inode(dax_dev); + dev->devt = inode->i_rdev; + if (subsys == DEV_DAX_BUS) + dev->bus = &dax_bus_type; + else + dev->class = dax_class; + dev->parent = parent; + dev->groups = dax_attribute_groups; + dev->release = dev_dax_release; + dev_set_name(dev, "dax%d.%d", dax_region->id, id); + + rc = device_add(dev); + if (rc) { + kill_dev_dax(dev_dax); + put_device(dev); + return ERR_PTR(rc); + } + + rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); + if (rc) + return ERR_PTR(rc); + + return dev_dax; + + err: + kfree(dev_dax); + + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(__devm_create_dev_dax); + +static int match_always_count; + +int __dax_driver_register(struct dax_device_driver *dax_drv, + struct module *module, const char *mod_name) +{ + struct device_driver *drv = &dax_drv->drv; + int rc = 0; + + INIT_LIST_HEAD(&dax_drv->ids); + drv->owner = module; + drv->name = mod_name; + drv->mod_name = mod_name; + drv->bus = &dax_bus_type; + + /* there can only be one default driver */ + mutex_lock(&dax_bus_lock); + match_always_count += dax_drv->match_always; + if (match_always_count > 1) { + match_always_count--; + WARN_ON(1); + rc = -EINVAL; + } + mutex_unlock(&dax_bus_lock); + if (rc) + return rc; + return driver_register(drv); +} +EXPORT_SYMBOL_GPL(__dax_driver_register); + +void dax_driver_unregister(struct dax_device_driver *dax_drv) +{ + struct device_driver *drv = &dax_drv->drv; + struct dax_id *dax_id, *_id; + + mutex_lock(&dax_bus_lock); + match_always_count -= dax_drv->match_always; + list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { + list_del(&dax_id->list); + kfree(dax_id); + } + mutex_unlock(&dax_bus_lock); + driver_unregister(drv); +} +EXPORT_SYMBOL_GPL(dax_driver_unregister); + +int __init dax_bus_init(void) +{ + int rc; + + if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) { + dax_class = class_create(THIS_MODULE, "dax"); + if (IS_ERR(dax_class)) + return PTR_ERR(dax_class); + } + + rc = bus_register(&dax_bus_type); + if (rc) + class_destroy(dax_class); + return rc; +} + +void __exit dax_bus_exit(void) +{ + bus_unregister(&dax_bus_type); + class_destroy(dax_class); +} diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h new file mode 100644 index 000000000000..8619e3299943 --- /dev/null +++ b/drivers/dax/bus.h @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#ifndef __DAX_BUS_H__ +#define __DAX_BUS_H__ +#include <linux/device.h> + +struct dev_dax; +struct resource; +struct dax_device; +struct dax_region; +void dax_region_put(struct dax_region *dax_region); +struct dax_region *alloc_dax_region(struct device *parent, int region_id, + struct resource *res, int target_node, unsigned int align, + unsigned long flags); + +enum dev_dax_subsys { + DEV_DAX_BUS, + DEV_DAX_CLASS, +}; + +struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id, + struct dev_pagemap *pgmap, enum dev_dax_subsys subsys); + +static inline struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, + int id, struct dev_pagemap *pgmap) +{ + return __devm_create_dev_dax(dax_region, id, pgmap, DEV_DAX_BUS); +} + +/* to be deleted when DEV_DAX_CLASS is removed */ +struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys); + +struct dax_device_driver { + struct device_driver drv; + struct list_head ids; + int match_always; +}; + +int __dax_driver_register(struct dax_device_driver *dax_drv, + struct module *module, const char *mod_name); +#define dax_driver_register(driver) \ + __dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +void dax_driver_unregister(struct dax_device_driver *dax_drv); +void kill_dev_dax(struct dev_dax *dev_dax); + +#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) +int dev_dax_probe(struct device *dev); +#endif + +/* + * While run_dax() is potentially a generic operation that could be + * defined in include/linux/dax.h we don't want to grow any users + * outside of drivers/dax/ + */ +void run_dax(struct dax_device *dax_dev); + +#define MODULE_ALIAS_DAX_DEVICE(type) \ + MODULE_ALIAS("dax:t" __stringify(type) "*") +#define DAX_DEVICE_MODALIAS_FMT "dax:t%d" + +#endif /* __DAX_BUS_H__ */ diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index b6fc4f04636d..a45612148ca0 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -16,10 +16,17 @@ #include <linux/device.h> #include <linux/cdev.h> +/* private routines between core files */ +struct dax_device; +struct dax_device *inode_dax(struct inode *inode); +struct inode *dax_inode(struct dax_device *dax_dev); +int dax_bus_init(void); +void dax_bus_exit(void); + /** * struct dax_region - mapping infrastructure for dax devices * @id: kernel-wide unique region for a memory range - * @base: linear address corresponding to @res + * @target_node: effective numa node if this memory range is onlined * @kref: to pin while other agents have a need to do lookups * @dev: parent device backing this region * @align: allocation and mapping alignment for child dax devices @@ -28,8 +35,7 @@ */ struct dax_region { int id; - struct ida ida; - void *base; + int target_node; struct kref kref; struct device *dev; unsigned int align; @@ -38,20 +44,28 @@ struct dax_region { }; /** - * struct dev_dax - instance data for a subdivision of a dax region + * struct dev_dax - instance data for a subdivision of a dax region, and + * data while the device is activated in the driver. * @region - parent region * @dax_dev - core dax functionality + * @target_node: effective numa node if dev_dax memory range is onlined * @dev - device core - * @id - child id in the region - * @num_resources - number of physical address extents in this device - * @res - array of physical address ranges + * @pgmap - pgmap for memmap setup / lifetime (driver owned) + * @ref: pgmap reference count (driver owned) + * @cmp: @ref final put completion (driver owned) */ struct dev_dax { struct dax_region *region; struct dax_device *dax_dev; + int target_node; struct device dev; - int id; - int num_resources; - struct resource res[0]; + struct dev_pagemap pgmap; + struct percpu_ref ref; + struct completion cmp; }; + +static inline struct dev_dax *to_dev_dax(struct device *dev) +{ + return container_of(dev, struct dev_dax, dev); +} #endif diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h deleted file mode 100644 index f9e5feea742c..000000000000 --- a/drivers/dax/dax.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __DAX_H__ -#define __DAX_H__ -struct dax_device; -struct dax_device *inode_dax(struct inode *inode); -struct inode *dax_inode(struct dax_device *dax_dev); -#endif /* __DAX_H__ */ diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h deleted file mode 100644 index 688b051750bd..000000000000 --- a/drivers/dax/device-dax.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __DEVICE_DAX_H__ -#define __DEVICE_DAX_H__ -struct device; -struct dev_dax; -struct resource; -struct dax_region; -void dax_region_put(struct dax_region *dax_region); -struct dax_region *alloc_dax_region(struct device *parent, - int region_id, struct resource *res, unsigned int align, - void *addr, unsigned long flags); -struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - int id, struct resource *res, int count); -#endif /* __DEVICE_DAX_H__ */ diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 948806e57cee..e428468ab661 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -1,15 +1,6 @@ -/* - * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */ +#include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> @@ -21,161 +12,39 @@ #include <linux/mm.h> #include <linux/mman.h> #include "dax-private.h" -#include "dax.h" +#include "bus.h" -static struct class *dax_class; - -/* - * Rely on the fact that drvdata is set before the attributes are - * registered, and that the attributes are unregistered before drvdata - * is cleared to assume that drvdata is always valid. - */ -static ssize_t id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", dax_region->id); -} -static DEVICE_ATTR_RO(id); - -static ssize_t region_size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%llu\n", (unsigned long long) - resource_size(&dax_region->res)); -} -static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, - region_size_show, NULL); - -static ssize_t align_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%u\n", dax_region->align); -} -static DEVICE_ATTR_RO(align); - -static struct attribute *dax_region_attributes[] = { - &dev_attr_region_size.attr, - &dev_attr_align.attr, - &dev_attr_id.attr, - NULL, -}; - -static const struct attribute_group dax_region_attribute_group = { - .name = "dax_region", - .attrs = dax_region_attributes, -}; - -static const struct attribute_group *dax_region_attribute_groups[] = { - &dax_region_attribute_group, - NULL, -}; - -static void dax_region_free(struct kref *kref) -{ - struct dax_region *dax_region; - - dax_region = container_of(kref, struct dax_region, kref); - kfree(dax_region); -} - -void dax_region_put(struct dax_region *dax_region) +static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref) { - kref_put(&dax_region->kref, dax_region_free); + return container_of(ref, struct dev_dax, ref); } -EXPORT_SYMBOL_GPL(dax_region_put); -static void dax_region_unregister(void *region) +static void dev_dax_percpu_release(struct percpu_ref *ref) { - struct dax_region *dax_region = region; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - sysfs_remove_groups(&dax_region->dev->kobj, - dax_region_attribute_groups); - dax_region_put(dax_region); + dev_dbg(&dev_dax->dev, "%s\n", __func__); + complete(&dev_dax->cmp); } -struct dax_region *alloc_dax_region(struct device *parent, int region_id, - struct resource *res, unsigned int align, void *addr, - unsigned long pfn_flags) +static void dev_dax_percpu_exit(void *data) { - struct dax_region *dax_region; - - /* - * The DAX core assumes that it can store its private data in - * parent->driver_data. This WARN is a reminder / safeguard for - * developers of device-dax drivers. - */ - if (dev_get_drvdata(parent)) { - dev_WARN(parent, "dax core failed to setup private data\n"); - return NULL; - } - - if (!IS_ALIGNED(res->start, align) - || !IS_ALIGNED(resource_size(res), align)) - return NULL; - - dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); - if (!dax_region) - return NULL; - - dev_set_drvdata(parent, dax_region); - memcpy(&dax_region->res, res, sizeof(*res)); - dax_region->pfn_flags = pfn_flags; - kref_init(&dax_region->kref); - dax_region->id = region_id; - ida_init(&dax_region->ida); - dax_region->align = align; - dax_region->dev = parent; - dax_region->base = addr; - if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { - kfree(dax_region); - return NULL; - } + struct percpu_ref *ref = data; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - kref_get(&dax_region->kref); - if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) - return NULL; - return dax_region; + dev_dbg(&dev_dax->dev, "%s\n", __func__); + wait_for_completion(&dev_dax->cmp); + percpu_ref_exit(ref); } -EXPORT_SYMBOL_GPL(alloc_dax_region); -static struct dev_dax *to_dev_dax(struct device *dev) +static void dev_dax_percpu_kill(struct percpu_ref *data) { - return container_of(dev, struct dev_dax, dev); -} - -static ssize_t size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_dax *dev_dax = to_dev_dax(dev); - unsigned long long size = 0; - int i; + struct percpu_ref *ref = data; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - for (i = 0; i < dev_dax->num_resources; i++) - size += resource_size(&dev_dax->res[i]); - - return sprintf(buf, "%llu\n", size); + dev_dbg(&dev_dax->dev, "%s\n", __func__); + percpu_ref_kill(ref); } -static DEVICE_ATTR_RO(size); - -static struct attribute *dev_dax_attributes[] = { - &dev_attr_size.attr, - NULL, -}; - -static const struct attribute_group dev_dax_attribute_group = { - .attrs = dev_dax_attributes, -}; - -static const struct attribute_group *dax_attribute_groups[] = { - &dev_dax_attribute_group, - NULL, -}; static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, const char *func) @@ -226,21 +95,11 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { - struct resource *res; - /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ - phys_addr_t uninitialized_var(phys); - int i; - - for (i = 0; i < dev_dax->num_resources; i++) { - res = &dev_dax->res[i]; - phys = pgoff * PAGE_SIZE + res->start; - if (phys >= res->start && phys <= res->end) - break; - pgoff -= PHYS_PFN(resource_size(res)); - } + struct resource *res = &dev_dax->region->res; + phys_addr_t phys; - if (i < dev_dax->num_resources) { - res = &dev_dax->res[i]; + phys = pgoff * PAGE_SIZE + res->start; + if (phys >= res->start && phys <= res->end) { if (phys + size - 1 <= res->end) return phys; } @@ -576,152 +435,100 @@ static const struct file_operations dax_fops = { .mmap_supported_flags = MAP_SYNC, }; -static void dev_dax_release(struct device *dev) +static void dev_dax_cdev_del(void *cdev) { - struct dev_dax *dev_dax = to_dev_dax(dev); - struct dax_region *dax_region = dev_dax->region; - struct dax_device *dax_dev = dev_dax->dax_dev; - - if (dev_dax->id >= 0) - ida_simple_remove(&dax_region->ida, dev_dax->id); - dax_region_put(dax_region); - put_dax(dax_dev); - kfree(dev_dax); + cdev_del(cdev); } -static void kill_dev_dax(struct dev_dax *dev_dax) +static void dev_dax_kill(void *dev_dax) { - struct dax_device *dax_dev = dev_dax->dax_dev; - struct inode *inode = dax_inode(dax_dev); - - kill_dax(dax_dev); - unmap_mapping_range(inode->i_mapping, 0, 0, 1); + kill_dev_dax(dev_dax); } -static void unregister_dev_dax(void *dev) +int dev_dax_probe(struct device *dev) { struct dev_dax *dev_dax = to_dev_dax(dev); struct dax_device *dax_dev = dev_dax->dax_dev; - struct inode *inode = dax_inode(dax_dev); - struct cdev *cdev = inode->i_cdev; - - dev_dbg(dev, "trace\n"); - - kill_dev_dax(dev_dax); - cdev_device_del(cdev, dev); - put_device(dev); -} - -struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - int id, struct resource *res, int count) -{ - struct device *parent = dax_region->dev; - struct dax_device *dax_dev; - struct dev_dax *dev_dax; + struct resource *res = &dev_dax->region->res; struct inode *inode; - struct device *dev; struct cdev *cdev; - int rc, i; - - if (!count) - return ERR_PTR(-EINVAL); - - dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); - if (!dev_dax) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < count; i++) { - if (!IS_ALIGNED(res[i].start, dax_region->align) - || !IS_ALIGNED(resource_size(&res[i]), - dax_region->align)) { - rc = -EINVAL; - break; - } - dev_dax->res[i].start = res[i].start; - dev_dax->res[i].end = res[i].end; + void *addr; + int rc; + + /* 1:1 map region resource range to device-dax instance range */ + if (!devm_request_mem_region(dev, res->start, resource_size(res), + dev_name(dev))) { + dev_warn(dev, "could not reserve region %pR\n", res); + return -EBUSY; } - if (i < count) - goto err_id; + init_completion(&dev_dax->cmp); + rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0, + GFP_KERNEL); + if (rc) + return rc; - if (id < 0) { - id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); - dev_dax->id = id; - if (id < 0) { - rc = id; - goto err_id; - } - } else { - /* region provider owns @id lifetime */ - dev_dax->id = -1; - } + rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref); + if (rc) + return rc; - /* - * No 'host' or dax_operations since there is no access to this - * device outside of mmap of the resulting character device. - */ - dax_dev = alloc_dax(dev_dax, NULL, NULL); - if (!dax_dev) { - rc = -ENOMEM; - goto err_dax; + dev_dax->pgmap.ref = &dev_dax->ref; + dev_dax->pgmap.kill = dev_dax_percpu_kill; + addr = devm_memremap_pages(dev, &dev_dax->pgmap); + if (IS_ERR(addr)) { + devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref); + percpu_ref_exit(&dev_dax->ref); + return PTR_ERR(addr); } - /* from here on we're committed to teardown via dax_dev_release() */ - dev = &dev_dax->dev; - device_initialize(dev); - inode = dax_inode(dax_dev); cdev = inode->i_cdev; cdev_init(cdev, &dax_fops); - cdev->owner = parent->driver->owner; - - dev_dax->num_resources = count; - dev_dax->dax_dev = dax_dev; - dev_dax->region = dax_region; - kref_get(&dax_region->kref); - - dev->devt = inode->i_rdev; - dev->class = dax_class; - dev->parent = parent; - dev->groups = dax_attribute_groups; - dev->release = dev_dax_release; - dev_set_name(dev, "dax%d.%d", dax_region->id, id); - - rc = cdev_device_add(cdev, dev); - if (rc) { - kill_dev_dax(dev_dax); - put_device(dev); - return ERR_PTR(rc); - } - - rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); + if (dev->class) { + /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */ + cdev->owner = dev->parent->driver->owner; + } else + cdev->owner = dev->driver->owner; + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); if (rc) - return ERR_PTR(rc); + return rc; - return dev_dax; + rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev); + if (rc) + return rc; - err_dax: - if (dev_dax->id >= 0) - ida_simple_remove(&dax_region->ida, dev_dax->id); - err_id: - kfree(dev_dax); + run_dax(dax_dev); + return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax); +} +EXPORT_SYMBOL_GPL(dev_dax_probe); - return ERR_PTR(rc); +static int dev_dax_remove(struct device *dev) +{ + /* all probe actions are unwound by devm */ + return 0; } -EXPORT_SYMBOL_GPL(devm_create_dev_dax); + +static struct dax_device_driver device_dax_driver = { + .drv = { + .probe = dev_dax_probe, + .remove = dev_dax_remove, + }, + .match_always = 1, +}; static int __init dax_init(void) { - dax_class = class_create(THIS_MODULE, "dax"); - return PTR_ERR_OR_ZERO(dax_class); + return dax_driver_register(&device_dax_driver); } static void __exit dax_exit(void) { - class_destroy(dax_class); + dax_driver_unregister(&device_dax_driver); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); -subsys_initcall(dax_init); +module_init(dax_init); module_exit(dax_exit); +MODULE_ALIAS_DAX_DEVICE(0); diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c new file mode 100644 index 000000000000..a02318c6d28a --- /dev/null +++ b/drivers/dax/kmem.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */ +#include <linux/memremap.h> +#include <linux/pagemap.h> +#include <linux/memory.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pfn_t.h> +#include <linux/slab.h> +#include <linux/dax.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include "dax-private.h" +#include "bus.h" + +int dev_dax_kmem_probe(struct device *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + struct resource *res = &dev_dax->region->res; + resource_size_t kmem_start; + resource_size_t kmem_size; + resource_size_t kmem_end; + struct resource *new_res; + int numa_node; + int rc; + + /* + * Ensure good NUMA information for the persistent memory. + * Without this check, there is a risk that slow memory + * could be mixed in a node with faster memory, causing + * unavoidable performance issues. + */ + numa_node = dev_dax->target_node; + if (numa_node < 0) { + dev_warn(dev, "rejecting DAX region %pR with invalid node: %d\n", + res, numa_node); + return -EINVAL; + } + + /* Hotplug starting at the beginning of the next block: */ + kmem_start = ALIGN(res->start, memory_block_size_bytes()); + + kmem_size = resource_size(res); + /* Adjust the size down to compensate for moving up kmem_start: */ + kmem_size -= kmem_start - res->start; + /* Align the size down to cover only complete blocks: */ + kmem_size &= ~(memory_block_size_bytes() - 1); + kmem_end = kmem_start + kmem_size; + + /* Region is permanently reserved. Hot-remove not yet implemented. */ + new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev)); + if (!new_res) { + dev_warn(dev, "could not reserve region [%pa-%pa]\n", + &kmem_start, &kmem_end); + return -EBUSY; + } + + /* + * Set flags appropriate for System RAM. Leave ..._BUSY clear + * so that add_memory() can add a child resource. Do not + * inherit flags from the parent since it may set new flags + * unknown to us that will break add_memory() below. + */ + new_res->flags = IORESOURCE_SYSTEM_RAM; + new_res->name = dev_name(dev); + + rc = add_memory(numa_node, new_res->start, resource_size(new_res)); + if (rc) + return rc; + + return 0; +} + +static int dev_dax_kmem_remove(struct device *dev) +{ + /* + * Purposely leak the request_mem_region() for the device-dax + * range and return '0' to ->remove() attempts. The removal of + * the device from the driver always succeeds, but the region + * is permanently pinned as reserved by the unreleased + * request_mem_region(). + */ + return 0; +} + +static struct dax_device_driver device_dax_kmem_driver = { + .drv = { + .probe = dev_dax_kmem_probe, + .remove = dev_dax_kmem_remove, + }, +}; + +static int __init dax_kmem_init(void) +{ + return dax_driver_register(&device_dax_kmem_driver); +} + +static void __exit dax_kmem_exit(void) +{ + dax_driver_unregister(&device_dax_kmem_driver); +} + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +module_init(dax_kmem_init); +module_exit(dax_kmem_exit); +MODULE_ALIAS_DAX_DEVICE(0); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c deleted file mode 100644 index 2c1f459c0c63..000000000000 --- a/drivers/dax/pmem.c +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include <linux/percpu-refcount.h> -#include <linux/memremap.h> -#include <linux/module.h> -#include <linux/pfn_t.h> -#include "../nvdimm/pfn.h" -#include "../nvdimm/nd.h" -#include "device-dax.h" - -struct dax_pmem { - struct device *dev; - struct percpu_ref ref; - struct dev_pagemap pgmap; - struct completion cmp; -}; - -static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref) -{ - return container_of(ref, struct dax_pmem, ref); -} - -static void dax_pmem_percpu_release(struct percpu_ref *ref) -{ - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - complete(&dax_pmem->cmp); -} - -static void dax_pmem_percpu_exit(void *data) -{ - struct percpu_ref *ref = data; - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - wait_for_completion(&dax_pmem->cmp); - percpu_ref_exit(ref); -} - -static void dax_pmem_percpu_kill(struct percpu_ref *ref) -{ - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - percpu_ref_kill(ref); -} - -static int dax_pmem_probe(struct device *dev) -{ - void *addr; - struct resource res; - int rc, id, region_id; - struct nd_pfn_sb *pfn_sb; - struct dev_dax *dev_dax; - struct dax_pmem *dax_pmem; - struct nd_namespace_io *nsio; - struct dax_region *dax_region; - struct nd_namespace_common *ndns; - struct nd_dax *nd_dax = to_nd_dax(dev); - struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; - - ndns = nvdimm_namespace_common_probe(dev); - if (IS_ERR(ndns)) - return PTR_ERR(ndns); - nsio = to_nd_namespace_io(&ndns->dev); - - dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL); - if (!dax_pmem) - return -ENOMEM; - - /* parse the 'pfn' info block via ->rw_bytes */ - rc = devm_nsio_enable(dev, nsio); - if (rc) - return rc; - rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap); - if (rc) - return rc; - devm_nsio_disable(dev, nsio); - - pfn_sb = nd_pfn->pfn_sb; - - if (!devm_request_mem_region(dev, nsio->res.start, - resource_size(&nsio->res), - dev_name(&ndns->dev))) { - dev_warn(dev, "could not reserve region %pR\n", &nsio->res); - return -EBUSY; - } - - dax_pmem->dev = dev; - init_completion(&dax_pmem->cmp); - rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0, - GFP_KERNEL); - if (rc) - return rc; - - rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); - if (rc) { - percpu_ref_exit(&dax_pmem->ref); - return rc; - } - - dax_pmem->pgmap.ref = &dax_pmem->ref; - dax_pmem->pgmap.kill = dax_pmem_percpu_kill; - addr = devm_memremap_pages(dev, &dax_pmem->pgmap); - if (IS_ERR(addr)) - return PTR_ERR(addr); - - /* adjust the dax_region resource to the start of data */ - memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); - res.start += le64_to_cpu(pfn_sb->dataoff); - - rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); - if (rc != 2) - return -EINVAL; - - dax_region = alloc_dax_region(dev, region_id, &res, - le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); - if (!dax_region) - return -ENOMEM; - - /* TODO: support for subdividing a dax region... */ - dev_dax = devm_create_dev_dax(dax_region, id, &res, 1); - - /* child dev_dax instances now own the lifetime of the dax_region */ - dax_region_put(dax_region); - - return PTR_ERR_OR_ZERO(dev_dax); -} - -static struct nd_device_driver dax_pmem_driver = { - .probe = dax_pmem_probe, - .drv = { - .name = "dax_pmem", - }, - .type = ND_DRIVER_DAX_PMEM, -}; - -module_nd_driver(dax_pmem_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile new file mode 100644 index 000000000000..e2e79bd3fdcf --- /dev/null +++ b/drivers/dax/pmem/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o +obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o +obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o + +dax_pmem-y := pmem.o +dax_pmem_core-y := core.o +dax_pmem_compat-y := compat.o diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c new file mode 100644 index 000000000000..d7b15e6f30c5 --- /dev/null +++ b/drivers/dax/pmem/compat.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include <linux/percpu-refcount.h> +#include <linux/memremap.h> +#include <linux/module.h> +#include <linux/pfn_t.h> +#include <linux/nd.h> +#include "../bus.h" + +/* we need the private definitions to implement compat suport */ +#include "../dax-private.h" + +static int dax_pmem_compat_probe(struct device *dev) +{ + struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS); + int rc; + + if (IS_ERR(dev_dax)) + return PTR_ERR(dev_dax); + + if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL)) + return -ENOMEM; + + device_lock(&dev_dax->dev); + rc = dev_dax_probe(&dev_dax->dev); + device_unlock(&dev_dax->dev); + + devres_close_group(&dev_dax->dev, dev_dax); + if (rc) + devres_release_group(&dev_dax->dev, dev_dax); + + return rc; +} + +static int dax_pmem_compat_release(struct device *dev, void *data) +{ + device_lock(dev); + devres_release_group(dev, to_dev_dax(dev)); + device_unlock(dev); + + return 0; +} + +static int dax_pmem_compat_remove(struct device *dev) +{ + device_for_each_child(dev, NULL, dax_pmem_compat_release); + return 0; +} + +static struct nd_device_driver dax_pmem_compat_driver = { + .probe = dax_pmem_compat_probe, + .remove = dax_pmem_compat_remove, + .drv = { + .name = "dax_pmem_compat", + }, + .type = ND_DRIVER_DAX_PMEM, +}; + +static int __init dax_pmem_compat_init(void) +{ + return nd_driver_register(&dax_pmem_compat_driver); +} +module_init(dax_pmem_compat_init); + +static void __exit dax_pmem_compat_exit(void) +{ + driver_unregister(&dax_pmem_compat_driver.drv); +} +module_exit(dax_pmem_compat_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c new file mode 100644 index 000000000000..f71019ce0647 --- /dev/null +++ b/drivers/dax/pmem/core.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include <linux/memremap.h> +#include <linux/module.h> +#include <linux/pfn_t.h> +#include "../../nvdimm/pfn.h" +#include "../../nvdimm/nd.h" +#include "../bus.h" + +struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) +{ + struct resource res; + int rc, id, region_id; + resource_size_t offset; + struct nd_pfn_sb *pfn_sb; + struct dev_dax *dev_dax; + struct nd_namespace_io *nsio; + struct dax_region *dax_region; + struct dev_pagemap pgmap = { 0 }; + struct nd_namespace_common *ndns; + struct nd_dax *nd_dax = to_nd_dax(dev); + struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; + struct nd_region *nd_region = to_nd_region(dev->parent); + + ndns = nvdimm_namespace_common_probe(dev); + if (IS_ERR(ndns)) + return ERR_CAST(ndns); + nsio = to_nd_namespace_io(&ndns->dev); + + /* parse the 'pfn' info block via ->rw_bytes */ + rc = devm_nsio_enable(dev, nsio); + if (rc) + return ERR_PTR(rc); + rc = nvdimm_setup_pfn(nd_pfn, &pgmap); + if (rc) + return ERR_PTR(rc); + devm_nsio_disable(dev, nsio); + + /* reserve the metadata area, device-dax will reserve the data */ + pfn_sb = nd_pfn->pfn_sb; + offset = le64_to_cpu(pfn_sb->dataoff); + if (!devm_request_mem_region(dev, nsio->res.start, offset, + dev_name(&ndns->dev))) { + dev_warn(dev, "could not reserve metadata\n"); + return ERR_PTR(-EBUSY); + } + + rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); + if (rc != 2) + return ERR_PTR(-EINVAL); + + /* adjust the dax_region resource to the start of data */ + memcpy(&res, &pgmap.res, sizeof(res)); + res.start += offset; + dax_region = alloc_dax_region(dev, region_id, &res, + nd_region->target_node, le32_to_cpu(pfn_sb->align), + PFN_DEV|PFN_MAP); + if (!dax_region) + return ERR_PTR(-ENOMEM); + + dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys); + + /* child dev_dax instances now own the lifetime of the dax_region */ + dax_region_put(dax_region); + + return dev_dax; +} +EXPORT_SYMBOL_GPL(__dax_pmem_probe); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c new file mode 100644 index 000000000000..0ae4238a0ef8 --- /dev/null +++ b/drivers/dax/pmem/pmem.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include <linux/percpu-refcount.h> +#include <linux/memremap.h> +#include <linux/module.h> +#include <linux/pfn_t.h> +#include <linux/nd.h> +#include "../bus.h" + +static int dax_pmem_probe(struct device *dev) +{ + return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS)); +} + +static struct nd_device_driver dax_pmem_driver = { + .probe = dax_pmem_probe, + .drv = { + .name = "dax_pmem", + }, + .type = ND_DRIVER_DAX_PMEM, +}; + +static int __init dax_pmem_init(void) +{ + return nd_driver_register(&dax_pmem_driver); +} +module_init(dax_pmem_init); + +static void __exit dax_pmem_exit(void) +{ + driver_unregister(&dax_pmem_driver.drv); +} +module_exit(dax_pmem_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); +#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) +/* For compat builds, don't load this module by default */ +MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); +#endif diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0cb8c30ea278..0a339b85133e 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -22,6 +22,7 @@ #include <linux/uio.h> #include <linux/dax.h> #include <linux/fs.h> +#include "dax-private.h" static dev_t dax_devt; DEFINE_STATIC_SRCU(dax_srcu); @@ -383,11 +384,15 @@ void kill_dax(struct dax_device *dax_dev) spin_lock(&dax_host_lock); hlist_del_init(&dax_dev->list); spin_unlock(&dax_host_lock); - - dax_dev->private = NULL; } EXPORT_SYMBOL_GPL(kill_dax); +void run_dax(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_ALIVE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(run_dax); + static struct inode *dax_alloc_inode(struct super_block *sb) { struct dax_device *dax_dev; @@ -602,6 +607,8 @@ EXPORT_SYMBOL_GPL(dax_inode); void *dax_get_private(struct dax_device *dax_dev) { + if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) + return NULL; return dax_dev->private; } EXPORT_SYMBOL_GPL(dax_get_private); @@ -615,7 +622,7 @@ static void init_once(void *_dax_dev) inode_init_once(inode); } -static int __dax_fs_init(void) +static int dax_fs_init(void) { int rc; @@ -647,35 +654,45 @@ static int __dax_fs_init(void) return rc; } -static void __dax_fs_exit(void) +static void dax_fs_exit(void) { kern_unmount(dax_mnt); unregister_filesystem(&dax_fs_type); kmem_cache_destroy(dax_cache); } -static int __init dax_fs_init(void) +static int __init dax_core_init(void) { int rc; - rc = __dax_fs_init(); + rc = dax_fs_init(); if (rc) return rc; rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); if (rc) - __dax_fs_exit(); - return rc; + goto err_chrdev; + + rc = dax_bus_init(); + if (rc) + goto err_bus; + return 0; + +err_bus: + unregister_chrdev_region(dax_devt, MINORMASK+1); +err_chrdev: + dax_fs_exit(); + return 0; } -static void __exit dax_fs_exit(void) +static void __exit dax_core_exit(void) { unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); - __dax_fs_exit(); + dax_fs_exit(); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); -subsys_initcall(dax_fs_init); -module_exit(dax_fs_exit); +subsys_initcall(dax_core_init); +module_exit(dax_core_exit); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ead851413c0a..16fcb56c232b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; + vm->bulk_moveable &= list_empty(&vm->evicted); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -947,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (r) return r; - r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); - if (r) - goto error_free_pt; - if (vm->use_cpu_for_update) { r = amdgpu_bo_kmap(pt, NULL); if (r) @@ -963,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); + + r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); + if (r) + goto error_free_pt; } return 0; @@ -3033,13 +3035,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto error_unreserve; + amdgpu_vm_bo_base_init(&vm->root.base, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, adev->vm_manager.root_level, vm->pte_support_ats); if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5533f6e4f4a4..d0309e8c9d12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 600259b4e291..2fe8397241ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) } ring->vm_inv_eng = inv_eng - 1; - change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c63de945c021..0487e3a4e9e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; uint32_t reg; - reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; - WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg); - reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2); + reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000); return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 99ebcf29dcb0..ed89a101f73f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA20: soc15_asic_get_baco_capability(adev, &baco_reset); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 47243165a082..ae90a99909ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { - uint64_t addr; - struct cik_mqd *m; - int retval; - - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), - mqd_mem_obj); - - if (retval != 0) - return -ENOMEM; - - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; - addr = (*mqd_mem_obj)->gpu_addr; - - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); - - m->header = 0xC0310800; - m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | - PRELOAD_REQ; - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | - QUANTUM_DURATION(10); - - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; - m->cp_mqd_base_addr_lo = lower_32_bits(addr); - m->cp_mqd_base_addr_hi = upper_32_bits(addr); - - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; - - /* - * Pipe Priority - * Identifies the pipe relative priority when this queue is connected - * to the pipeline. The pipe priority is against the GFX pipe and HP3D. - * In KFD we are using a fixed pipe priority set to CS_MEDIUM. - * 0 = CS_LOW (typically below GFX) - * 1 = CS_MEDIUM (typically between HP3D and GFX - * 2 = CS_HIGH (typically above HP3D) - */ - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - - *mqd = m; - if (gart_addr) - *gart_addr = addr; - retval = mm->update_mqd(mm, m, q); - - return retval; + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); } static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f26581b93ff..fb27783d7a54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link) return; } + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -952,6 +953,8 @@ static int dm_resume(void *handle) if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); @@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) sink = aconnector->dc_link->local_sink; + if (sink) + dc_sink_retain(sink); /* * Edid mgmt connector gets first update only in mode_valid hook and then @@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * to it anymore after disconnect, so on next crtc to connector * reshuffle by UMD we will get into unwanted dc_sink release */ - if (aconnector->dc_sink != aconnector->dc_em_sink) - dc_sink_release(aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); - if (!aconnector->dc_sink) + if (!aconnector->dc_sink) { aconnector->dc_sink = aconnector->dc_em_sink; - else if (aconnector->dc_sink != aconnector->dc_em_sink) dc_sink_retain(aconnector->dc_sink); + } } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); return; } @@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_sink_release(sink); return; + } if (aconnector->dc_sink == sink) { /* @@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) */ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); + if (sink) + dc_sink_release(sink); return; } @@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); @@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); } static void handle_hpd_irq(void *param) @@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, return stream; } else { sink = aconnector->dc_sink; + dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); @@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); finish: - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) - dc_sink_release(sink); + dc_sink_release(sink); return stream; } @@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) dm->backlight_dev = NULL; } #endif + + if (aconnector->dc_em_sink) + dc_sink_release(aconnector->dc_em_sink); + aconnector->dc_em_sink = NULL; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); @@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) (edid->extensions + 1) * EDID_LENGTH, &init_params); - if (aconnector->base.force == DRM_FORCE_ON) + if (aconnector->base.force == DRM_FORCE_ON) { aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; + dc_sink_retain(aconnector->dc_sink); + } } static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f51d52eb52e6..c4ea3a91f17a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) &init_params); dc_sink->priv = aconnector; + /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; if (aconnector->dc_sink) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 12d1842079ae..eb62d10bb65c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; - kernel_fpu_begin(); - /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&fclks); @@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); + kernel_fpu_end(); + res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&dcfclks); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 7f5a947ad31d..4eba3c4800b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; sink->converter_disable_audio = converter_disable_audio; + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -2037,6 +2038,9 @@ static enum dc_status enable_link( break; } + if (status == DC_OK) + pipe_ctx->stream->link->link_status.link_active = true; + return status; } @@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_disable_link_phy_mst(link, signal); } else link->link_enc->funcs->disable_output(link->link_enc, signal); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } } static bool dp_active_dongle_validate_timing( @@ -2623,8 +2635,6 @@ void core_link_enable_stream( } } - stream->link->link_status.link_active = true; - core_dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ @@ -2659,8 +2669,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); - - pipe_ctx->stream->link->link_status.link_active = false; } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 94a84bc57c7a..bfd27f10879e 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal, static void build_vrr_infopacket_v2(enum signal_type signal, const struct mod_vrr_params *vrr, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { unsigned int payload_size = 0; @@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal, build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); build_vrr_infopacket_data(vrr, infopacket); - if (app_tf != NULL) - build_vrr_infopacket_fs2_data(*app_tf, infopacket); + build_vrr_infopacket_fs2_data(app_tf, infopacket); build_vrr_infopacket_checksum(&payload_size, infopacket); @@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { /* SPD info packet for FreeSync diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index 4222e403b151..dcef85994c45 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket); void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index ce177d7f04cb..6bf48934fdc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (!skip_display_settings) phm_notify_smc_display_config_after_ps_adjustment(hwmgr); - if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && - !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) + if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 4588bddf8b33..615cf2c09e54 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, } int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency) + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency) { struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; struct atom_get_smu_clock_info_output_parameters_v3_1 *output; uint32_t ix; - parameters.clk_id = id; - parameters.syspll_id = 0; + parameters.clk_id = clk_id; + parameters.syspll_id = syspll_id; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; parameters.dfsdid = 0; @@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency)) + boot_values->ulFClk = frequency; } static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, @@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency)) boot_values->ulDClk = frequency; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe9e8ceef50e..b7e2651b570b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulEClk; uint32_t ulVClk; uint32_t ulDClk; + uint32_t ulFClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency); + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 48187acac59e..83d3d935f3ac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94, 0); + ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { - mdelay(1); + mdelay(500); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); + ixSMU_PM_STATUS_95); if (tmp != 0) break; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 5479125ff4f6..5c4f701939ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); + SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); + SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; @@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 6c8e78611c03..bdb48e94eff6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index aad79affb081..9aa7bec1b5fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -463,9 +463,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) { dpm_state->soft_min_level = 0x0; - dpm_state->soft_max_level = 0xffff; + dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_state->hard_min_level = 0x0; - dpm_state->hard_max_level = 0xffff; + dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; } static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, @@ -711,8 +711,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!ret, "[SetupDefaultDpmTable] failed to get fclk dpm levels!", return ret); - } else - dpm_table->count = 0; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; + } vega20_init_dpm_state(&(dpm_table->dpm_state)); /* save a copy of the default DPM table */ @@ -754,6 +756,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.eclock = boot_up_values.ulEClk; data->vbios_boot_state.vclock = boot_up_values.ulVClk; data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.fclock = boot_up_values.ulFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; smum_send_msg_to_smc_with_parameter(hwmgr, @@ -780,6 +783,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; int ret; @@ -816,6 +821,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) "[OverridePcieParameters] Attempt to override pcie params failed!", return ret); + data->pcie_parameters_override = 1; + data->pcie_gen_level1 = pcie_gen; + data->pcie_width_level1 = pcie_width; + return 0; } @@ -979,6 +988,8 @@ static int vega20_od8_set_feature_capabilities( } if (data->smu_features[GNLD_DPM_UCLK].enabled) { + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = + data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && @@ -2314,32 +2325,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { - struct vega20_hwmgr *data = - (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t soft_min_level, soft_max_level; int ret = 0; - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->dpm_table.gfx_table.dpm_state.soft_min_level = - data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; - data->dpm_table.gfx_table.dpm_state.soft_max_level = - data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); - data->dpm_table.mem_table.dpm_state.soft_min_level = - data->dpm_table.mem_table.dpm_levels[soft_min_level].value; - data->dpm_table.mem_table.dpm_state.soft_max_level = - data->dpm_table.mem_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); - data->dpm_table.soc_table.dpm_state.soft_min_level = - data->dpm_table.soc_table.dpm_levels[soft_min_level].value; - data->dpm_table.soc_table.dpm_state.soft_max_level = - data->dpm_table.soc_table.dpm_levels[soft_max_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", @@ -2641,9 +2628,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, - "[GetSclks]: gfxclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2670,9 +2656,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, - "[GetMclks]: uclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_UCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = data->mclk_latency_table.count = count; @@ -2696,9 +2681,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, - "[GetDcfclocks]: dcefclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2719,9 +2703,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, - "[GetSocclks]: socclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2799,7 +2782,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - struct pp_clock_levels_with_latency clocks; int32_t input_index, input_clk, input_vol, i; int od8_id; int ret; @@ -2858,11 +2840,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EOPNOTSUPP; } - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels failed!", - return ret); - for (i = 0; i < size; i += 2) { if (i + 2 > size) { pr_info("invalid number of input parameters %d\n", @@ -2879,11 +2856,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < clocks.data[0].clocks_in_khz / 1000 || + if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); return -EINVAL; } @@ -3088,9 +3065,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); @@ -3128,7 +3105,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, &(data->dpm_table.fclk_table); int i, now, size = 0; int ret = 0; - uint32_t gen_speed, lane_width; + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; switch (type) { case PP_SCLK: @@ -3137,10 +3114,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current gfx clk Failed!", return ret); - ret = vega20_get_sclks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get gfx clk levels Failed!", - return ret); + if (vega20_get_sclks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3154,10 +3132,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current mclk freq Failed!", return ret); - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels Failed!", - return ret); + if (vega20_get_memclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3171,10 +3150,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current socclk freq Failed!", return ret); - ret = vega20_get_socclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get soc clk levels Failed!", - return ret); + if (vega20_get_socclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3200,10 +3180,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current dcefclk freq Failed!", return ret); - ret = vega20_get_dcefclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get dcefclk levels Failed!", - return ret); + if (vega20_get_dcefclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3212,28 +3193,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; - for (i = 0; i < NUM_LINK_LEVELS; i++) + for (i = 0; i < NUM_LINK_LEVELS; i++) { + if (i == 1 && data->pcie_parameters_override) { + gen_speed = data->pcie_gen_level1; + lane_width = data->pcie_width_level1; + } else { + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + } size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, - (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : - (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : - (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : - (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", - (pptable->PcieLaneCount[i] == 1) ? "x1" : - (pptable->PcieLaneCount[i] == 2) ? "x2" : - (pptable->PcieLaneCount[i] == 3) ? "x4" : - (pptable->PcieLaneCount[i] == 4) ? "x8" : - (pptable->PcieLaneCount[i] == 5) ? "x12" : - (pptable->PcieLaneCount[i] == 6) ? "x16" : "", + (gen_speed == 0) ? "2.5GT/s," : + (gen_speed == 1) ? "5.0GT/s," : + (gen_speed == 2) ? "8.0GT/s," : + (gen_speed == 3) ? "16.0GT/s," : "", + (lane_width == 1) ? "x1" : + (lane_width == 2) ? "x2" : + (lane_width == 3) ? "x4" : + (lane_width == 4) ? "x8" : + (lane_width == 5) ? "x12" : + (lane_width == 6) ? "x16" : "", pptable->LclkFreq[i], - (gen_speed == pptable->PcieGenSpeed[i]) && - (lane_width == pptable->PcieLaneCount[i]) ? + (current_gen_speed == gen_speed) && + (current_lane_width == lane_width) ? "*" : ""); + } break; case OD_SCLK: @@ -3288,13 +3277,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Fail to get memory clk levels!", - return ret); - size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3356,6 +3340,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, return ret; } +static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); + int ret = 0; + + if (data->smu_features[GNLD_DPM_FCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetFclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, + "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), + "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", + return ret); + } + + return ret; +} + static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); @@ -3366,8 +3375,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, &data->dpm_table.mem_table); + if (ret) + return ret; - return ret; + return vega20_set_fclk_to_highest_dpm_level(hwmgr); } static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) @@ -3461,9 +3472,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { @@ -3485,9 +3496,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* memclk */ dpm_table = &(data->dpm_table.mem_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { @@ -3526,12 +3537,21 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* fclk */ + dpm_table = &(data->dpm_table.fclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* vclk */ dpm_table = &(data->dpm_table.vclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3548,9 +3568,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* dclk */ dpm_table = &(data->dpm_table.dclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3567,9 +3587,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* socclk */ dpm_table = &(data->dpm_table.soc_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { @@ -3586,9 +3606,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* eclk */ dpm_table = &(data->dpm_table.eclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 37f5f5e657da..a5bc758ae097 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -42,6 +42,8 @@ #define AVFS_CURVE 0 #define OD8_HOTCURVE_TEMPERATURE 85 +#define VG20_CLOCK_MAX_DEFAULT 0xFFFF + typedef uint32_t PP_Clock; enum { @@ -219,6 +221,7 @@ struct vega20_vbios_boot_state { uint32_t eclock; uint32_t dclock; uint32_t vclock; + uint32_t fclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 @@ -523,6 +526,10 @@ struct vega20_hwmgr { unsigned long metrics_time; SmuMetrics_t metrics_table; + + bool pcie_parameters_override; + uint32_t pcie_gen_level1; + uint32_t pcie_width_level1; }; #define VEGA20_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 97f8a1a970c3..7a7f15d0c53a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -32,6 +32,8 @@ #include "cgs_common.h" #include "vega20_pptable.h" +#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105 + static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) { @@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable return 0; } +static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable); + + ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE; + + return 0; +} + #define VEGA20_ENGINECLOCK_HARDMAX 198000 static int init_powerplay_table_information( struct pp_hwmgr *hwmgr, @@ -887,6 +900,10 @@ static int init_powerplay_table_information( result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); + if (result) + return result; + + result = override_powerplay_table_fantargettemperature(hwmgr); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 52abca065764..2d4cfe14f72e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c index 079fc8e8f709..742b3dc1f6cb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b7ff7d4d6f44..ba00744c3413 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 540a77a2ade9..40ac19848034 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3039,9 +3039,31 @@ commit: return 0; } -static int __drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx, - bool clean_old_fbs) +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_atomic_state *state; struct drm_connector_state *conn_state; @@ -3099,35 +3121,6 @@ free: drm_atomic_state_put(state); return ret; } - -/** - * drm_atomic_helper_disable_all - disable all currently active outputs - * @dev: DRM device - * @ctx: lock acquisition context - * - * Loops through all connectors, finding those that aren't turned off and then - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC - * that they are connected to. - * - * This is used for example in suspend/resume to disable all currently active - * functions when suspending. If you just want to shut down everything at e.g. - * driver unload, look at drm_atomic_helper_shutdown(). - * - * Note that if callers haven't already acquired all modeset locks this might - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). - * - * Returns: - * 0 on success or a negative error code on failure. - * - * See also: - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and - * drm_atomic_helper_shutdown(). - */ -int drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) -{ - return __drm_atomic_helper_disable_all(dev, ctx, false); -} EXPORT_SYMBOL(drm_atomic_helper_disable_all); /** @@ -3148,7 +3141,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); - ret = __drm_atomic_helper_disable_all(dev, &ctx, true); + ret = drm_atomic_helper_disable_all(dev, &ctx); if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 67b1fca39aa6..0e3043e08c69 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, m32.size = map.size; m32.type = map.type; m32.flags = map.flags; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); m32.mtrr = map.mtrr; if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; @@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, m32.offset = map.offset; m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); if (map.handle != compat_ptr(m32.handle)) pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", map.handle, m32.type, m32.offset); @@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, if (err) return err; - req32.handle = ptr_to_compat(req.handle); + req32.handle = ptr_to_compat((void __user *)req.handle); if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 041a77e400d4..21df44b78df3 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -2,7 +2,6 @@ config DRM_ETNAVIV tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" depends on DRM - depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on MMU select SHMEM select SYNC_FILE diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h index acb68c698363..4d5d1a77eb2a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h @@ -15,8 +15,6 @@ struct etnaviv_perfmon_request; struct etnaviv_cmdbuf { /* suballocator this cmdbuf is allocated from */ struct etnaviv_cmdbuf_suballoc *suballoc; - /* user context key, must be unique between all active users */ - struct etnaviv_file_private *ctx; /* cmdbuf properties */ int suballoc_offset; void *vaddr; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 3fbb4855396c..33854c94cb85 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) mutex_lock(&obj->lock); pages = etnaviv_gem_get_pages(obj); mutex_unlock(&obj->lock); - if (pages) { + if (!IS_ERR(pages)) { int j; iter.hdr->data[0] = bomap - bomap_start; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 76079c2291f8..f0abb744ef95 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -95,6 +95,7 @@ struct etnaviv_gem_submit_bo { struct etnaviv_gem_submit { struct drm_sched_job sched_job; struct kref refcount; + struct etnaviv_file_private *ctx; struct etnaviv_gpu *gpu; struct dma_fence *out_fence, *in_fence; int out_fence_id; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 0566171f8df2..f21529e635e3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ - return NULL; + return ERR_PTR(-EINVAL); return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 30875f8f2933..b2fe3446bfbc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto err_submit_objects; - submit->cmdbuf.ctx = file->driver_priv; + submit->ctx = file->driver_priv; submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f1c88d8ad5ba..f794e04be9e6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) domain = &etnaviv_domain->base; domain->dev = gpu->dev; - domain->base = 0; - domain->size = (u64)SZ_1G * 4; + domain->base = SZ_4K; + domain->size = (u64)SZ_1G * 4 - SZ_4K; domain->ops = &etnaviv_iommuv2_ops; ret = etnaviv_iommuv2_init(etnaviv_domain); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 9980d81a26e3..4227a4006c34 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = { .name = "PE", .profile_read = VIVS_MC_PROFILE_PE_READ, .profile_config = VIVS_MC_PROFILE_CONFIG0, - .nr_signals = 5, + .nr_signals = 4, .signal = (const struct etnaviv_pm_signal[]) { { "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", @@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, dom = meta->domains + signal->domain; - if (signal->iter > dom->nr_signals) + if (signal->iter >= dom->nr_signals) return -EINVAL; sig = &dom->signal[signal->iter]; @@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r, dom = meta->domains + r->domain; - if (r->signal > dom->nr_signals) + if (r->signal >= dom->nr_signals) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 67ae26602024..6d24fea1766b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, mutex_lock(&submit->gpu->fence_lock); ret = drm_sched_job_init(&submit->sched_job, sched_entity, - submit->cmdbuf.ctx); + submit->ctx); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 0573eab0e190..f35e4ab55b27 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -20,6 +20,7 @@ #include "regs-vp.h" #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> @@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha) mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +static bool mixer_is_synced(struct mixer_context *ctx) { - /* block update on vsync */ - mixer_reg_writemask(ctx, MXR_STATUS, enable ? - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + u32 base, shadow; + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + return !(mixer_reg_read(ctx, MXR_CFG) & + MXR_CFG_LAYER_UPDATE_COUNT_MASK); + + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + return false; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); + if (base != shadow) + return false; + + return true; +} + +static int mixer_wait_for_sync(struct mixer_context *ctx) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100000); + + while (!mixer_is_synced(ctx)) { + usleep_range(1000, 2000); + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + return 0; +} + +static void mixer_disable_sync(struct mixer_context *ctx) +{ + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); +} + +static void mixer_enable_sync(struct mixer_context *ctx) +{ + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? - VP_SHADOW_UPDATE_ENABLE : 0); + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); } static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) @@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_regs_dump(ctx); } -static void mixer_layer_update(struct mixer_context *ctx) -{ - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { @@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); - /* layer update mandatory for mixer 16.0.33.0 */ - if (ctx->mxr_ver == MXR_VER_16_0_33_0 || - ctx->mxr_ver == MXR_VER_128_0_0_184) - mixer_layer_update(ctx); - spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); @@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - u32 val, base, shadow; + u32 val; spin_lock(&ctx->reg_slock); @@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && - vp_reg_read(ctx, VP_SHADOW_UPDATE)) - goto out; - - base = mixer_reg_read(ctx, MXR_CFG); - shadow = mixer_reg_read(ctx, MXR_CFG_S); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); - if (base != shadow) - goto out; - } + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) + && !mixer_is_synced(ctx)) + goto out; drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) { - struct mixer_context *mixer_ctx = crtc->ctx; + struct mixer_context *ctx = crtc->ctx; - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, false); + if (mixer_wait_for_sync(ctx)) + dev_err(ctx->dev, "timeout waiting for VSYNC\n"); + mixer_disable_sync(ctx); } static void mixer_update_plane(struct exynos_drm_crtc *crtc, @@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, true); + mixer_enable_sync(mixer_ctx); exynos_crtc_handle_event(crtc); } @@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) exynos_drm_pipe_clk_enable(crtc, true); - mixer_vsync_set_update(ctx, false); + mixer_disable_sync(ctx); mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_commit(ctx); - mixer_vsync_set_update(ctx, true); + mixer_enable_sync(ctx); set_bit(MXR_BIT_POWERED, &ctx->flags); } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 215b6ff8aa73..db7bb5bd5add 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref, struct i915_request *rq) { struct i915_active_request *active; + int err = 0; + + /* Prevent reaping in case we malloc/wait while building the tree */ + i915_active_acquire(ref); active = active_instance(ref, timeline); - if (IS_ERR(active)) - return PTR_ERR(active); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto out; + } if (!i915_active_request_isset(active)) ref->count++; __i915_active_request_set(active, rq); GEM_BUG_ON(!ref->count); - return 0; +out: + i915_active_release(ref); + return err; } bool i915_active_acquire(struct i915_active *ref) @@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq, int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) { struct active_node *it, *n; - int ret; + int err = 0; - ret = i915_request_await_active_request(rq, &ref->last); - if (ret) - return ret; + /* await allocates and so we need to avoid hitting the shrinker */ + if (i915_active_acquire(ref)) + goto out; /* was idle */ + + err = i915_request_await_active_request(rq, &ref->last); + if (err) + goto out; rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - ret = i915_request_await_active_request(rq, &it->base); - if (ret) - return ret; + err = i915_request_await_active_request(rq, &it->base); + if (err) + goto out; } - return 0; +out: + i915_active_release(ref); + return err; } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6630212f2faf..9df65d386d11 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -757,39 +757,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - static void intel_init_dpio(struct drm_i915_private *dev_priv) { /* @@ -1420,7 +1387,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) goto err_ggtt; } - ret = i915_kick_out_vgacon(dev_priv); + ret = vga_remove_vgacon(pdev); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto err_ggtt; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6728ea5c71d4..8558e81fdc2a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp, if (vma->vm_file != filp) return false; - return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; + return vma->vm_start == addr && + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); } /** @@ -1733,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * pages from. */ if (!obj->base.filp) { - i915_gem_object_put(obj); - return -ENXIO; + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, @@ -1748,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - i915_gem_object_put(obj); - return -EINTR; + addr = -EINTR; + goto err; } vma = find_vma(mm, addr); if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) @@ -1767,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, i915_gem_object_put(obj); args->addr_ptr = (u64)addr; - return 0; err: i915_gem_object_put(obj); - return addr; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9a65341fec09..aa6791255252 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) i915_error_generate_code(error, engines)); if (engines) { /* Just show the first executing process, more is confusing */ - i = ffs(engines); + i = __ffs(engines); len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index d01683167c77..8bc042551692 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -223,8 +223,14 @@ out: return &p->requests[idx]; } +struct sched_cache { + struct list_head *priolist; +}; + static struct intel_engine_cs * -sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) +sched_lock_engine(const struct i915_sched_node *node, + struct intel_engine_cs *locked, + struct sched_cache *cache) { struct intel_engine_cs *engine = node_to_request(node)->engine; @@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) if (engine != locked) { spin_unlock(&locked->timeline.lock); + memset(cache, 0, sizeof(*cache)); spin_lock(&engine->timeline.lock); } @@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq, static void __i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) { - struct list_head *uninitialized_var(pl); - struct intel_engine_cs *engine, *last; + struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; const int prio = attr->priority; + struct sched_cache cache; LIST_HEAD(dfs); /* Needed in order to use the temporary link inside i915_dependency */ @@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq, __list_del_entry(&stack.dfs_link); } - last = NULL; + memset(&cache, 0, sizeof(cache)); engine = rq->engine; spin_lock_irq(&engine->timeline.lock); @@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq, INIT_LIST_HEAD(&dep->dfs_link); - engine = sched_lock_engine(node, engine); + engine = sched_lock_engine(node, engine, &cache); lockdep_assert_held(&engine->timeline.lock); /* Recheck after acquiring the engine->timeline.lock */ @@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq, node->attr.priority = prio; if (!list_empty(&node->link)) { - if (last != engine) { - pl = i915_sched_lookup_priolist(engine, prio); - last = engine; - } - list_move_tail(&node->link, pl); + if (!cache.priolist) + cache.priolist = + i915_sched_lookup_priolist(engine, + prio); + list_move_tail(&node->link, cache.priolist); } else { /* * If the request is not in the priolist queue because diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b508d8a735e0..4364f42cac6b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_hdmi = info->supports_dvi; info->supports_dp = (port != PORT_E); + info->supports_edp = (port == PORT_A); } } diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index cacaa1d04d17..09ed90c0ba00 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - - /* - * We may race with direct invocation of - * dma_fence_signal(), e.g. i915_request_retire(), - * in which case we can skip processing it ourselves. - */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &rq->fence.flags)) - continue; /* * Queue for execution after dropping the signaling @@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) * more signalers to the same context or engine. */ i915_request_get(rq); + + /* + * We may race with direct invocation of + * dma_fence_signal(), e.g. i915_request_retire(), + * so we need to acquire our reference to the request + * before we cancel the breadcrumb. + */ + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); list_add_tail(&rq->signal_link, &signal); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ca705546a0ab..14d580cdefd3 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); + + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); + else if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + intel_hdcp_disable(to_intel_connector(conn_state->connector)); } static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, @@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc, goto out; ret = drm_atomic_commit(state); - if (ret) - goto out; - - return 0; - - out: +out: drm_atomic_state_put(state); return ret; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index d00d0bb07784..7eb58a9d1319 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915, unsigned int flags, struct i915_gem_context *ctx, struct intel_engine_cs *engine, - struct igt_spinner **spin_out) + struct igt_spinner **spin) { - int ret = 0; - - if (flags & (TEST_BUSY | TEST_RESET)) { - struct igt_spinner *spin; - struct i915_request *rq; + struct i915_request *rq; + int ret; - spin = kzalloc(sizeof(*spin), GFP_KERNEL); - if (!spin) { - ret = -ENOMEM; - goto out; - } + *spin = NULL; + if (!(flags & (TEST_BUSY | TEST_RESET))) + return 0; - ret = igt_spinner_init(spin, i915); - if (ret) - return ret; + *spin = kzalloc(sizeof(**spin), GFP_KERNEL); + if (!*spin) + return -ENOMEM; - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - igt_spinner_fini(spin); - kfree(spin); - goto out; - } + ret = igt_spinner_init(*spin, i915); + if (ret) + goto err_free; - i915_request_add(rq); + rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto err_fini; + } - if (!igt_wait_for_spinner(spin, rq)) { - pr_err("%s: Spinner failed to start!\n", name); - igt_spinner_end(spin); - igt_spinner_fini(spin); - kfree(spin); - ret = -ETIMEDOUT; - goto out; - } + i915_request_add(rq); - *spin_out = spin; + if (!igt_wait_for_spinner(*spin, rq)) { + pr_err("%s: Spinner failed to start!\n", name); + ret = -ETIMEDOUT; + goto err_end; } -out: + return 0; + +err_end: + igt_spinner_end(*spin); +err_fini: + igt_spinner_fini(*spin); +err_free: + kfree(fetch_and_zero(spin)); return ret; } @@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915, ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); if (ret) - goto out; + goto out_context; ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); if (ret) - goto out; + goto out_spin; ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, hweight32(sseu.slice_mask), spin); -out: +out_spin: if (spin) { igt_spinner_end(spin); igt_spinner_fini(spin); kfree(spin); } +out_context: kernel_context_close(kctx); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 88a52f6b39fe..7dfbbbc1beea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (IS_ERR_VALUE(ret) && ret != -EACCES) + if (ret < 0 && ret != -EACCES) return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index aa9fec80492d..40c47d6a7d78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -100,12 +100,10 @@ static void nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) { struct nouveau_dmem_chunk *chunk; - struct nouveau_drm *drm; unsigned long idx; chunk = (void *)hmm_devmem_page_get_drvdata(page); idx = page_to_pfn(page) - chunk->pfn_first; - drm = chunk->drm; /* * FIXME: @@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm) /* FIXME handle pin failure */ WARN_ON(ret); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); - /* FIXME handle pin failure */ - WARN_ON(ret); - } mutex_unlock(&drm->dmem->mutex); } @@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm) list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { nouveau_bo_unpin(chunk->bo); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - nouveau_bo_unpin(chunk->bo); - } mutex_unlock(&drm->dmem->mutex); } @@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) */ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device, size); - if (drm->dmem->devmem == NULL) { + if (IS_ERR(drm->dmem->devmem)) { kfree(drm->dmem); drm->dmem = NULL; return; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index bb81e310eb6d..578d867a81d5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto free_dev; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); + if (ret) + goto disable_pci; + ret = qxl_device_init(qdev, &qxl_driver, pdev); if (ret) goto disable_pci; @@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto modeset_cleanup; - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); drm_fbdev_generic_setup(&qdev->ddev, 32); return 0; diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index d5a23295dd80..bb7b58407039 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_put(&gobj->base); + drm_gem_object_put_unlocked(&gobj->base); unlock: mutex_unlock(&udl->gem_lock); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b913a56f3426..2a9112515f46 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; - struct drm_display_mode *old_mode; struct drm_display_mode *mode; int ret; - old_mode = par->set_mode; mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->vdisplay = var->yres; vmw_guess_mode_timing(mode); - if (old_mode && drm_mode_equal(old_mode, mode)) { - drm_mode_destroy(vmw_priv->dev, mode); - mode = old_mode; - old_mode = NULL; - } else if (!vmw_kms_validate_mode_vram(vmw_priv, + if (!vmw_kms_validate_mode_vram(vmw_priv, mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info) schedule_delayed_work(&par->local_work, 0); out_unlock: - if (old_mode) - drm_mode_destroy(vmw_priv->dev, old_mode); + if (par->set_mode) + drm_mode_destroy(vmw_priv->dev, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b93c558dd86e..7da752ca1c34 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return id; + return (id != -ENOMEM ? 0 : id); spin_lock(&gman->lock); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index dc8e039bfab5..f2f3ef8af271 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -48,6 +48,8 @@ #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/screen_info.h> +#include <linux/vt.h> +#include <linux/console.h> #include <linux/uaccess.h> @@ -168,6 +170,53 @@ void vga_set_default_device(struct pci_dev *pdev) vga_default = pci_dev_get(pdev); } +/** + * vga_remove_vgacon - deactivete vga console + * + * Unbind and unregister vgacon in case pdev is the default vga + * device. Can be called by gpu drivers on initialization to make + * sure vga register access done by vgacon will not disturb the + * device. + * + * @pdev: pci device. + */ +#if !defined(CONFIG_VGA_CONSOLE) +int vga_remove_vgacon(struct pci_dev *pdev) +{ + return 0; +} +#elif !defined(CONFIG_DUMMY_CONSOLE) +int vga_remove_vgacon(struct pci_dev *pdev) +{ + return -ENODEV; +} +#else +int vga_remove_vgacon(struct pci_dev *pdev) +{ + int ret = 0; + + if (pdev != vga_default) + return 0; + vgaarb_info(&pdev->dev, "deactivate vga console\n"); + + console_lock(); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, + MAX_NR_CONSOLES - 1, 1); + if (ret == 0) { + ret = do_unregister_con_driver(&vga_con); + + /* Ignore "already unregistered". */ + if (ret == -ENODEV) + ret = 0; + } + console_unlock(); + + return ret; +} +#endif +EXPORT_SYMBOL(vga_remove_vgacon); + static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) { if (vgadev->irq_set_state) diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index 9a63e87ea5f3..be302ec5f66b 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -871,7 +871,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) } pm_runtime_put(&adev->dev); - dev_info(dev, "%s initialized\n", (char *)id->data); + dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id)); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; @@ -915,36 +915,18 @@ static const struct dev_pm_ops etm_dev_pm_ops = { }; static const struct amba_id etm_ids[] = { - { /* ETM 3.3 */ - .id = 0x000bb921, - .mask = 0x000fffff, - .data = "ETM 3.3", - }, - { /* ETM 3.5 - Cortex-A5 */ - .id = 0x000bb955, - .mask = 0x000fffff, - .data = "ETM 3.5", - }, - { /* ETM 3.5 */ - .id = 0x000bb956, - .mask = 0x000fffff, - .data = "ETM 3.5", - }, - { /* PTM 1.0 */ - .id = 0x000bb950, - .mask = 0x000fffff, - .data = "PTM 1.0", - }, - { /* PTM 1.1 */ - .id = 0x000bb95f, - .mask = 0x000fffff, - .data = "PTM 1.1", - }, - { /* PTM 1.1 Qualcomm */ - .id = 0x000b006f, - .mask = 0x000fffff, - .data = "PTM 1.1", - }, + /* ETM 3.3 */ + CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"), + /* ETM 3.5 - Cortex-A5 */ + CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"), + /* ETM 3.5 */ + CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"), + /* PTM 1.0 */ + CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"), + /* PTM 1.1 */ + CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"), + /* PTM 1.1 Qualcomm */ + CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"), { 0, 0}, }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index fe76b176974a..08ce37c9475d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1068,18 +1068,21 @@ err_arch_supported: return ret; } -#define ETM4x_AMBA_ID(pid) \ - { \ - .id = pid, \ - .mask = 0x000fffff, \ +static struct amba_cs_uci_id uci_id_etm4[] = { + { + /* ETMv4 UCI data */ + .devarch = 0x47704a13, + .devarch_mask = 0xfff0ffff, + .devtype = 0x00000013, } +}; static const struct amba_id etm4_ids[] = { - ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */ - ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */ - ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */ - ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */ - ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */ + CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */ + CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */ + CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */ + CS_AMBA_ID(0x000bb959), /* Cortex-A73 */ + CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */ {}, }; diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index b936c6d7e13f..e0684d06e9ee 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -6,6 +6,7 @@ #ifndef _CORESIGHT_PRIV_H #define _CORESIGHT_PRIV_H +#include <linux/amba/bus.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/coresight.h> @@ -160,4 +161,43 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; } static inline int etm_writel_cp14(u32 off, u32 val) { return 0; } #endif +/* + * Macros and inline functions to handle CoreSight UCI data and driver + * private data in AMBA ID table entries, and extract data values. + */ + +/* coresight AMBA ID, no UCI, no driver data: id table entry */ +#define CS_AMBA_ID(pid) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + } + +/* coresight AMBA ID, UCI with driver data only: id table entry. */ +#define CS_AMBA_ID_DATA(pid, dval) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + .data = (void *)&(struct amba_cs_uci_id) \ + { \ + .data = (void *)dval, \ + } \ + } + +/* coresight AMBA ID, full UCI structure: id table entry. */ +#define CS_AMBA_UCI_ID(pid, uci_ptr) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + .data = uci_ptr \ + } + +/* extract the data value from a UCI structure given amba_id pointer. */ +static inline void *coresight_get_uci_data(const struct amba_id *id) +{ + if (id->data) + return ((struct amba_cs_uci_id *)(id->data))->data; + return 0; +} + #endif diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index f07825df5c7a..9f8a844ed7aa 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -870,7 +870,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_put(&adev->dev); - dev_info(dev, "%s initialized\n", (char *)id->data); + dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id)); return 0; stm_unregister: @@ -905,16 +905,8 @@ static const struct dev_pm_ops stm_dev_pm_ops = { }; static const struct amba_id stm_ids[] = { - { - .id = 0x000bb962, - .mask = 0x000fffff, - .data = "STM32", - }, - { - .id = 0x000bb963, - .mask = 0x000fffff, - .data = "STM500", - }, + CS_AMBA_ID_DATA(0x000bb962, "STM32"), + CS_AMBA_ID_DATA(0x000bb963, "STM500"), { 0, 0}, }; diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index ea249f0bcd73..2a02da3d630f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -443,7 +443,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; desc.ops = &tmc_etr_cs_ops; - ret = tmc_etr_setup_caps(drvdata, devid, id->data); + ret = tmc_etr_setup_caps(drvdata, devid, + coresight_get_uci_data(id)); if (ret) goto out; break; @@ -475,26 +476,13 @@ out: } static const struct amba_id tmc_ids[] = { - { - .id = 0x000bb961, - .mask = 0x000fffff, - }, - { - /* Coresight SoC 600 TMC-ETR/ETS */ - .id = 0x000bb9e8, - .mask = 0x000fffff, - .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS, - }, - { - /* Coresight SoC 600 TMC-ETB */ - .id = 0x000bb9e9, - .mask = 0x000fffff, - }, - { - /* Coresight SoC 600 TMC-ETF */ - .id = 0x000bb9ea, - .mask = 0x000fffff, - }, + CS_AMBA_ID(0x000bb961), + /* Coresight SoC 600 TMC-ETR/ETS */ + CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS), + /* Coresight SoC 600 TMC-ETB */ + CS_AMBA_ID(0x000bb9e9), + /* Coresight SoC 600 TMC-ETF */ + CS_AMBA_ID(0x000bb9ea), { 0, 0}, }; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index ead5e7de3e4d..416f89b8f881 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -86,7 +86,6 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) struct i2c_timings *t = &dev->timings; u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; - dev->adapter.nr = -1; dev->tx_fifo_depth = 32; dev->rx_fifo_depth = 32; @@ -219,7 +218,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev) dev->mode = DW_IC_SLAVE; } -static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id) +static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev) { u32 param, tx_fifo_depth, rx_fifo_depth; @@ -233,7 +232,6 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id) if (!dev->tx_fifo_depth) { dev->tx_fifo_depth = tx_fifo_depth; dev->rx_fifo_depth = rx_fifo_depth; - dev->adapter.nr = id; } else if (tx_fifo_depth >= 2) { dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth, tx_fifo_depth); @@ -358,13 +356,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); } - dw_i2c_set_fifo_size(dev, pdev->id); + dw_i2c_set_fifo_size(dev); adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = I2C_CLASS_DEPRECATED; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->dev.of_node = pdev->dev.of_node; + adap->nr = -1; dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE | diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 660de1ee68ed..684d651612b3 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -503,7 +503,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON); - dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 0); + dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_rd_buf) return -ENOMEM; @@ -526,7 +526,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON); - dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0); + dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) return -ENOMEM; @@ -549,7 +549,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON); - dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0); + dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) return -ENOMEM; @@ -561,7 +561,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, return -ENOMEM; } - dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 0); + dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1); if (!dma_rd_buf) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index dd52a068b140..a7578f6da979 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -363,9 +363,6 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE ? priv->dma_rx : priv->dma_tx; - /* Disable DMA Master Received/Transmitted */ - rcar_i2c_write(priv, ICDMAER, 0); - dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), sg_dma_len(&priv->sg), priv->dma_direction); @@ -375,6 +372,9 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) priv->flags |= ID_P_NO_RXDMA; priv->dma_direction = DMA_NONE; + + /* Disable DMA Master Received/Transmitted, must be last! */ + rcar_i2c_write(priv, ICDMAER, 0); } static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv) @@ -611,6 +611,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) return true; } +/* + * This driver has a lock-free design because there are IP cores (at least + * R-Car Gen2) which have an inherent race condition in their hardware design. + * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after + * the interrupt was generated, otherwise an unwanted repeated message gets + * generated. It turned out that taking a spinlock at the beginning of the ISR + * was already causing repeated messages. Thus, this driver was converted to + * the now lockless behaviour. Please keep this in mind when hacking the driver. + */ static irqreturn_t rcar_i2c_irq(int irq, void *ptr) { struct rcar_i2c_priv *priv = ptr; diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index 1e6805b5cef2..a57aa4fe51a4 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -478,7 +478,7 @@ static int sis630_setup(struct pci_dev *sis630_dev) if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name)) { dev_err(&sis630_dev->dev, - "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n", + "I/O Region 0x%04x-0x%04x for SMBus already in use.\n", smbus_base + SMB_STS, smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1); retval = -EBUSY; @@ -528,7 +528,7 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) sis630_adapter.dev.parent = &dev->dev; snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), - "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS); + "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS); return i2c_add_adapter(&sis630_adapter); } diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 13e1213561d4..4284fc991cfd 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -432,7 +432,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); dnf_delay = setup->dnf * i2cclk; - sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - + sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time - af_delay_min - (setup->dnf + 3) * i2cclk; sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index cb6c5cb0df0b..38af18645133 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2258,7 +2258,8 @@ EXPORT_SYMBOL(i2c_put_adapter); /** * i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg * @msg: the message to be checked - * @threshold: the minimum number of bytes for which using DMA makes sense + * @threshold: the minimum number of bytes for which using DMA makes sense. + * Should at least be 1. * * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. * Or a valid pointer to be used with DMA. After use, release it by @@ -2268,7 +2269,11 @@ EXPORT_SYMBOL(i2c_put_adapter); */ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) { - if (msg->len < threshold) + /* also skip 0-length msgs for bogus thresholds of 0 */ + if (!threshold) + pr_debug("DMA buffer for addr=0x%02x with length 0 is bogus\n", + msg->addr); + if (msg->len < threshold || msg->len == 0) return NULL; if (msg->flags & I2C_M_DMA_SAFE) diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index c5a881172524..337410f40860 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - local_ipaddr = ntohl(in->ifa_list->ifa_address); + + if (!in->ifa_list) + local_ipaddr = 0; + else + local_ipaddr = ntohl(in->ifa_list->ifa_address); + rcu_read_unlock(); } else { local_ipaddr = ntohl(ifa->ifa_address); @@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, case NETDEV_UP: /* Fall through */ case NETDEV_CHANGEADDR: + + /* Just skip if no need to handle ARP cache */ + if (!local_ipaddr) + break; + i40iw_manage_arp_cache(iwdev, netdev->dev_addr, &local_ipaddr, diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 782499abcd98..2a0b59a4b6eb 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) unsigned long flags; for (i = 0 ; i < dev->num_ports; i++) { - cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); det = &sriov->alias_guid.ports_guid[i]; + cancel_delayed_work_sync(&det->alias_guid_work); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); while (!list_empty(&det->cb_list)) { cb_ctx = list_entry(det->cb_list.next, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index eaa055007f28..9e08df7914aa 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -20,6 +20,7 @@ enum devx_obj_flags { DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, + DEVX_OBJ_FLAGS_DCT = 1 << 1, }; struct devx_async_data { @@ -39,7 +40,10 @@ struct devx_obj { u32 dinlen; /* destroy inbox length */ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; u32 flags; - struct mlx5_ib_devx_mr devx_mr; + union { + struct mlx5_ib_devx_mr devx_mr; + struct mlx5_core_dct core_dct; + }; }; struct devx_umem { @@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in) obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, MLX5_GET(arm_rq_in, in, srq_number)); break; - case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, MLX5_GET(drain_dct_in, in, dctn)); @@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in) case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_ARM_RQ: - case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: @@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) devx_cleanup_mkey(obj); - ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct); + else + ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, + sizeof(out)); if (ib_is_destroy_retryable(ret, why, uobject)) return ret; @@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( devx_set_umem_valid(cmd_in); } - err = mlx5_cmd_exec(dev->mdev, cmd_in, - cmd_in_len, - cmd_out, cmd_out_len); + if (opcode == MLX5_CMD_OP_CREATE_DCT) { + obj->flags |= DEVX_OBJ_FLAGS_DCT; + err = mlx5_core_create_dct(dev->mdev, &obj->core_dct, + cmd_in, cmd_in_len, + cmd_out, cmd_out_len); + } else { + err = mlx5_cmd_exec(dev->mdev, cmd_in, + cmd_in_len, + cmd_out, cmd_out_len); + } + if (err) goto obj_free; @@ -1214,7 +1228,11 @@ err_copy: if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) devx_cleanup_mkey(obj); obj_destroy: - mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + mlx5_core_destroy_dct(obj->mdev, &obj->core_dct); + else + mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, + sizeof(out)); obj_free: kfree(obj); return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 994c19d01211..531ff20b32ad 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_EDR; + break; case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_HDR; break; + case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_EDR; + break; case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_HDR; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6b1f0e76900b..7cd006da1dae 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; + u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0}; u32 min_resp_len = offsetof(typeof(resp), dctn) + sizeof(resp.dctn); @@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, - MLX5_ST_SZ_BYTES(create_dct_in)); + MLX5_ST_SZ_BYTES(create_dct_in), out, + sizeof(out)); if (err) return err; resp.dctn = qp->dct.mdct.mqp.qpn; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6b0760dafb3e..21cb088d6687 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -140,10 +140,14 @@ static struct lock_class_key reserved_rbtree_key; static inline int match_hid_uid(struct device *dev, struct acpihid_map_entry *entry) { + struct acpi_device *adev = ACPI_COMPANION(dev); const char *hid, *uid; - hid = acpi_device_hid(ACPI_COMPANION(dev)); - uid = acpi_device_uid(ACPI_COMPANION(dev)); + if (!adev) + return -ENODEV; + + hid = acpi_device_hid(adev); + uid = acpi_device_uid(adev); if (!hid || !(*hid)) return -ENODEV; @@ -2604,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, /* Everything is mapped - write the right values into s->dma_address */ for_each_sg(sglist, s, nelems, i) { - s->dma_address += address + s->offset; + /* + * Add in the remaining piece of the scatter-gather offset that + * was masked out when we were determining the physical address + * via (sg_phys(s) & PAGE_MASK) earlier. + */ + s->dma_address += address + (s->offset & ~PAGE_MASK); s->dma_length = s->length; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 87274b54febd..28cb713d728c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) u32 pmen; unsigned long flags; + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flags); pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen &= ~DMA_PMEN_EPM; @@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd ctx_lo = context[0].lo; - sdev->did = domain->iommu_did[iommu->seq_id]; + sdev->did = FLPT_DEFAULT_DID; sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f8d3ba247523..2de8122e218f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr_iova = rb_entry(curr, struct iova, node); } while (curr && new_pfn <= curr_iova->pfn_hi); - if (limit_pfn < size || new_pfn < iovad->start_pfn) + if (limit_pfn < size || new_pfn < iovad->start_pfn) { + iovad->max32_alloc_size = size; goto iova32_full; + } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = new_pfn; @@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; iova32_full: - iovad->max32_alloc_size = size; spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 83364fedbf0a..5e4ca139e4ea 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -275,14 +275,14 @@ out_free: return ret; } -int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, +static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, struct device_node *parent) { return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); } IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); -int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, +static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, struct device_node *parent) { return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 2dd1ff0cf558..7577755bdcf4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) ra = container_of(a, struct lpi_range, entry); rb = container_of(b, struct lpi_range, entry); - return rb->base_id - ra->base_id; + return ra->base_id - rb->base_id; } static void merge_lpi_ranges(void) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ba2a37a27a54..fd3110c171ba 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, #endif } -static int gic_init_bases(struct gic_chip_data *gic, int irq_start, +static int gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle) { - irq_hw_number_t hwirq_base; - int gic_irqs, irq_base, ret; + int gic_irqs, ret; if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { /* Frankein-GIC without banked registers... */ @@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start, } else { /* Legacy support */ /* * For primary GICs, skip over SGIs. - * For secondary GICs, skip over PPIs, too. + * No secondary GIC support whatsoever. */ - if (gic == &gic_data[0] && (irq_start & 31) > 0) { - hwirq_base = 16; - if (irq_start != -1) - irq_start = (irq_start & ~31) + 16; - } else { - hwirq_base = 32; - } + int irq_base; - gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + gic_irqs -= 16; /* calculate # of irqs to allocate */ - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + irq_base = irq_alloc_descs(16, 16, gic_irqs, numa_node_id()); if (irq_base < 0) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; + WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n"); + irq_base = 16; } gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); + 16, &gic_irq_domain_ops, gic); } if (WARN_ON(!gic->domain)) { @@ -1195,7 +1187,6 @@ error: } static int __init __gic_init_bases(struct gic_chip_data *gic, - int irq_start, struct fwnode_handle *handle) { char *name; @@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, gic_init_chip(gic, NULL, name, false); } - ret = gic_init_bases(gic, irq_start, handle); + ret = gic_init_bases(gic, handle); if (ret) kfree(name); return ret; } -void __init gic_init(unsigned int gic_nr, int irq_start, - void __iomem *dist_base, void __iomem *cpu_base) +void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base) { struct gic_chip_data *gic; - if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR)) - return; - /* * Non-DT/ACPI systems won't run a hypervisor, so let's not * bother with these... */ static_branch_disable(&supports_deactivate_key); - gic = &gic_data[gic_nr]; + gic = &gic_data[0]; gic->raw_dist_base = dist_base; gic->raw_cpu_base = cpu_base; - __gic_init_bases(gic, irq_start, NULL); + __gic_init_bases(gic, NULL); } static void gic_teardown(struct gic_chip_data *gic) @@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) if (ret) return ret; - ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); + ret = gic_init_bases(*gic, &dev->of_node->fwnode); if (ret) { gic_teardown(*gic); return ret; @@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent) if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) static_branch_disable(&supports_deactivate_key); - ret = __gic_init_bases(gic, -1, &node->fwnode); + ret = __gic_init_bases(gic, &node->fwnode); if (ret) { gic_teardown(gic); return ret; @@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, return -ENOMEM; } - ret = __gic_init_bases(gic, -1, domain_handle); + ret = __gic_init_bases(gic, domain_handle); if (ret) { pr_err("Failed to initialise GIC\n"); irq_domain_free_fwnode(domain_handle); diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index d1098f4da6a4..88df3d00052c 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev) raw_spin_lock_init(&data->lock); - of_property_read_u32(np, "fsl,num-irqs", &irqs_num); - of_property_read_u32(np, "fsl,channel", &data->channel); + ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num); + if (ret) + return ret; + ret = of_property_read_u32(np, "fsl,channel", &data->channel); + if (ret) + return ret; /* * There is one output irq for each group of 64 inputs. diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 567b29c47608..98b6e1d4b1a6 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) void __iomem *base = d->chip_data; u32 val; + if (!msg->address_lo && !msg->address_hi) + return; + base += get_mbigen_vec_reg(d->hwirq); val = readl_relaxed(base); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 3496b61a312a..8eed478f3b7e 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node, return 0; } -const struct irq_domain_ops mmp_irq_domain_ops = { +static const struct irq_domain_ops mmp_irq_domain_ops = { .map = mmp_irq_domain_map, .xlate = mmp_irq_domain_xlate, }; diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index add4c9c934c8..18832ccc8ff8 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -478,7 +478,7 @@ dispose_irq: return ret; } -struct mvebu_sei_caps mvebu_sei_ap806_caps = { +static struct mvebu_sei_caps mvebu_sei_ap806_caps = { .ap_range = { .first = 0, .size = 21, diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index a93296b9b45d..7bd1d4cb2e19 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, const struct stm32_exti_bank *stm32_bank; struct stm32_exti_chip_data *chip_data; void __iomem *base = h_data->base; - u32 irqs_mask; stm32_bank = h_data->drv_data->exti_banks[bank_idx]; chip_data = &h_data->chips_data[bank_idx]; @@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, raw_spin_lock_init(&chip_data->rlock); - /* Determine number of irqs supported */ - writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst); - irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst); - /* * This IP has no reset, so after hot reboot we should * clear registers to avoid residue */ writel_relaxed(0, base + stm32_bank->imr_ofst); writel_relaxed(0, base + stm32_bank->emr_ofst); - writel_relaxed(0, base + stm32_bank->rtsr_ofst); - writel_relaxed(0, base + stm32_bank->ftsr_ofst); - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst); pr_info("%pOF: bank%d\n", h_data->node, bank_idx); diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c index b014957dde0b..a5f8bc2defbc 100644 --- a/drivers/lightnvm/pblk-rl.c +++ b/drivers/lightnvm/pblk-rl.c @@ -233,10 +233,15 @@ void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold) /* To start with, all buffer is available to user I/O writers */ rl->rb_budget = budget; rl->rb_user_max = budget; - rl->rb_max_io = threshold ? (budget - threshold) : (budget - 1); rl->rb_gc_max = 0; rl->rb_state = PBLK_RL_HIGH; + /* Maximize I/O size and ansure that back threshold is respected */ + if (threshold) + rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold; + else + rl->rb_max_io = budget - pblk->min_write_pgs_data - 1; + atomic_set(&rl->rb_user_cnt, 0); atomic_set(&rl->rb_gc_cnt, 0); atomic_set(&rl->rb_space, -1); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index abb5d382f64d..3b6880dd648d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mddev) set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, "reshape"); + if (!mddev->sync_thread) + goto out_free_conf; } return 0; @@ -4670,7 +4672,6 @@ read_more: atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; generic_make_request(read_bio); - sector_nr += nr_sectors; sectors_done += nr_sectors; if (sector_nr <= last) goto read_more; diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index bfb811407061..43c714a8798c 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h @@ -45,6 +45,7 @@ extern void ppl_stripe_write_finished(struct stripe_head *sh); extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); extern void ppl_quiesce(struct r5conf *conf, int quiesce); extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); +extern struct md_sysfs_entry ppl_write_hint; static inline bool raid5_has_log(struct r5conf *conf) { diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 0b096ddc9c1e..17e9e7d51097 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -20,6 +20,7 @@ #include <linux/raid/md_p.h> #include "md.h" #include "raid5.h" +#include "raid5-log.h" /* * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for @@ -115,6 +116,8 @@ struct ppl_conf { /* stripes to retry if failed to allocate io_unit */ struct list_head no_mem_stripes; spinlock_t no_mem_stripes_lock; + + unsigned short write_hint; }; struct ppl_log { @@ -474,6 +477,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->next_io_sector; bio_add_page(bio, io->header_page, PAGE_SIZE, 0); + bio->bi_write_hint = ppl_conf->write_hint; pr_debug("%s: log->current_io_sector: %llu\n", __func__, (unsigned long long)log->next_io_sector); @@ -503,6 +507,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &ppl_conf->bs); bio->bi_opf = prev->bi_opf; + bio->bi_write_hint = prev->bi_write_hint; bio_copy_dev(bio, prev); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -1407,6 +1412,7 @@ int ppl_init_log(struct r5conf *conf) atomic64_set(&ppl_conf->seq, 0); INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); spin_lock_init(&ppl_conf->no_mem_stripes_lock); + ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET; if (!mddev->external) { ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); @@ -1501,3 +1507,60 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) return ret; } + +static ssize_t +ppl_write_hint_show(struct mddev *mddev, char *buf) +{ + size_t ret = 0; + struct r5conf *conf; + struct ppl_conf *ppl_conf = NULL; + + spin_lock(&mddev->lock); + conf = mddev->private; + if (conf && raid5_has_ppl(conf)) + ppl_conf = conf->log_private; + ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0); + spin_unlock(&mddev->lock); + + return ret; +} + +static ssize_t +ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len) +{ + struct r5conf *conf; + struct ppl_conf *ppl_conf; + int err = 0; + unsigned short new; + + if (len >= PAGE_SIZE) + return -EINVAL; + if (kstrtou16(page, 10, &new)) + return -EINVAL; + + err = mddev_lock(mddev); + if (err) + return err; + + conf = mddev->private; + if (!conf) { + err = -ENODEV; + } else if (raid5_has_ppl(conf)) { + ppl_conf = conf->log_private; + if (!ppl_conf) + err = -EINVAL; + else + ppl_conf->write_hint = new; + } else { + err = -EINVAL; + } + + mddev_unlock(mddev); + + return err ?: len; +} + +struct md_sysfs_entry +ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR, + ppl_write_hint_show, + ppl_write_hint_store); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77ffd09be486..c033bfcb209e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6650,6 +6650,7 @@ static struct attribute *raid5_attrs[] = { &raid5_skip_copy.attr, &raid5_rmw_level.attr, &r5c_journal_mode.attr, + &ppl_write_hint.attr, NULL, }; static struct attribute_group raid5_attrs_group = { @@ -7393,6 +7394,8 @@ static int raid5_run(struct mddev *mddev) set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, "reshape"); + if (!mddev->sync_thread) + goto abort; } /* Ok, everything is just fine now */ diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index c712b7deb3a9..82a97866e0cf 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host) mmc->caps2 = MMC_CAP2_NO_SDIO; mmc->ops = &alcor_sdc_ops; - /* Hardware cannot do scatter lists */ + /* The hardware does DMA data transfer of 4096 bytes to/from a single + * buffer address. Scatterlists are not supported, but upon DMA + * completion (signalled via IRQ), the original vendor driver does + * then immediately set up another DMA transfer of the next 4096 + * bytes. + * + * This means that we need to handle the I/O in 4096 byte chunks. + * Lacking a way to limit the sglist entries to 4096 bytes, we instead + * impose that only one segment is provided, with maximum size 4096, + * which also happens to be the minimum size. This means that the + * single-entry sglist handled by this driver can be handed directly + * to the hardware, nice and simple. + * + * Unfortunately though, that means we only do 4096 bytes I/O per + * MMC command. A future improvement would be to make the driver + * accept sg lists and entries of any size, and simply iterate + * through them 4096 bytes at a time. + */ mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; - - mmc->max_blk_size = mmc->max_seg_size; - mmc->max_blk_count = mmc->max_segs; - - mmc->max_req_size = mmc->max_seg_size * mmc->max_segs; + mmc->max_req_size = mmc->max_seg_size; } static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 49e0daf2ef5e..f37003df1e01 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { } #endif -static void __init init_mmcsd_host(struct mmc_davinci_host *host) +static void init_mmcsd_host(struct mmc_davinci_host *host) { mmc_davinci_reset_ctrl(host, 1); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index d54612257b06..45f7b9b53d48 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data) struct scatterlist *sg; int i; - for_each_sg(data->sg, sg, data->sg_len, i) { - void *buf = kmap_atomic(sg_page(sg) + sg->offset); - buffer_swap32(buf, sg->length); - kunmap_atomic(buf); - } + for_each_sg(data->sg, sg, data->sg_len, i) + buffer_swap32(sg_virt(sg), sg->length); } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} @@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) { struct mmc_data *data = host->req->data; struct scatterlist *sg; - void *buf; int stat, i; host->data = data; @@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) if (data->flags & MMC_DATA_READ) { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_pull(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_pull(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; } } else { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_push(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_push(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index c907bf502a12..c1d3f0e38921 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param); static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) { struct dma_async_tx_descriptor *tx; - enum dma_data_direction direction; + enum dma_transfer_direction direction; struct dma_slave_config config; struct dma_chan *chan; unsigned int nob = data->blocks; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 71e13844df6c..8742e27e4e8b 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, struct renesas_sdhi *priv; struct resource *res; int irq, ret, i; + u16 ver; of_data = of_device_get_match_data(&pdev->dev); @@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret) goto efree; + ver = sd_ctrl_read16(host, CTL_VERSION); + /* GEN2_SDR104 is first known SDHI to use 32bit block count */ + if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX) + mmc_data->max_blk_count = U16_MAX; + ret = tmio_mmc_host_probe(host); if (ret < 0) goto edisclk; /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ - if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) + if (ver == SDHI_VER_GEN2_SDR50) mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; /* Enable tuning iff we have an SCC and a supported mode */ diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index b1a66ca3821a..5bbed477c9b1 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) mmc->f_max = 48000000; } + if (!mmc_can_gpio_ro(mmc)) + mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + pltfm_host->clk = devm_clk_get(dev, "fck"); if (IS_ERR(pltfm_host->clk)) { ret = PTR_ERR(pltfm_host->clk); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 576b37d12a63..c4fa400efdcc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); } +static u32 +qca8k_port_to_phy(int port) +{ + /* From Andrew Lunn: + * Port 0 has no internal phy. + * Port 1 has an internal PHY at MDIO address 0. + * Port 2 has an internal PHY at MDIO address 1. + * ... + * Port 5 has an internal PHY at MDIO address 4. + * Port 6 has no internal PHY. + */ + + return port - 1; +} + +static int +qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) +{ + u32 phy, val; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + + /* callee is responsible for not passing bad ports, + * but we still would like to make spills impossible. + */ + phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum) | + QCA8K_MDIO_MASTER_DATA(data); + + qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); + + return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); +} + +static int +qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) +{ + u32 phy, val; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + + /* callee is responsible for not passing bad ports, + * but we still would like to make spills impossible. + */ + phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum); + + qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); + + if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY)) + return -ETIMEDOUT; + + val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) & + QCA8K_MDIO_MASTER_DATA_MASK); + + return val; +} + +static int +qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) +{ + struct qca8k_priv *priv = ds->priv; + + return qca8k_mdio_write(priv, port, regnum, data); +} + +static int +qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct qca8k_priv *priv = ds->priv; + int ret; + + ret = qca8k_mdio_read(priv, port, regnum); + + if (ret < 0) + return 0xffff; + + return ret; +} + +static int +qca8k_setup_mdio_bus(struct qca8k_priv *priv) +{ + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; + struct device_node *ports, *port; + int err; + + ports = of_get_child_by_name(priv->dev->of_node, "ports"); + if (!ports) + return -EINVAL; + + for_each_available_child_of_node(ports, port) { + err = of_property_read_u32(port, "reg", ®); + if (err) + return err; + + if (!dsa_is_user_port(priv->ds, reg)) + continue; + + if (of_property_read_bool(port, "phy-handle")) + external_mdio_mask |= BIT(reg); + else + internal_mdio_mask |= BIT(reg); + } + + if (!external_mdio_mask && !internal_mdio_mask) { + dev_err(priv->dev, "no PHYs are defined.\n"); + return -EINVAL; + } + + /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through + * the MDIO_MASTER register also _disconnects_ the external MDC + * passthrough to the internal PHYs. It's not possible to use both + * configurations at the same time! + * + * Because this came up during the review process: + * If the external mdio-bus driver is capable magically disabling + * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's + * accessors for the time being, it would be possible to pull this + * off. + */ + if (!!external_mdio_mask && !!internal_mdio_mask) { + dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); + return -EINVAL; + } + + if (external_mdio_mask) { + /* Make sure to disable the internal mdio bus in cases + * a dt-overlay and driver reload changed the configuration + */ + + qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_EN); + return 0; + } + + priv->ops.phy_read = qca8k_phy_read; + priv->ops.phy_write = qca8k_phy_write; + return 0; +} + static int qca8k_setup(struct dsa_switch *ds) { @@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds) if (IS_ERR(priv->regmap)) pr_warn("regmap initialization failed"); + ret = qca8k_setup_mdio_bus(priv); + if (ret) + return ret; + /* Initialize CPU port pad mode (xMII type, delays...) */ phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); if (phy_mode < 0) { @@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) qca8k_port_set_status(priv, port, 1); } -static int -qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_read(priv->bus, phy, regnum); -} - -static int -qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_write(priv->bus, phy, regnum, val); -} - static void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { @@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .setup = qca8k_setup, .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, - .phy_read = qca8k_phy_read, - .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, .get_mac_eee = qca8k_get_mac_eee, @@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->ds->priv = priv; - priv->ds->ops = &qca8k_switch_ops; + priv->ops = qca8k_switch_ops; + priv->ds->ops = &priv->ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index d146e54c8a6c..249fd62268e5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -49,6 +49,18 @@ #define QCA8K_MIB_FLUSH BIT(24) #define QCA8K_MIB_CPU_KEEP BIT(20) #define QCA8K_MIB_BUSY BIT(17) +#define QCA8K_MDIO_MASTER_CTRL 0x3c +#define QCA8K_MDIO_MASTER_BUSY BIT(31) +#define QCA8K_MDIO_MASTER_EN BIT(30) +#define QCA8K_MDIO_MASTER_READ BIT(27) +#define QCA8K_MDIO_MASTER_WRITE 0 +#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) +#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) +#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) +#define QCA8K_MDIO_MASTER_DATA(x) (x) +#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) +#define QCA8K_MDIO_MASTER_MAX_PORTS 5 +#define QCA8K_MDIO_MASTER_MAX_REG 32 #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) @@ -169,6 +181,7 @@ struct qca8k_priv { struct dsa_switch *ds; struct mutex reg_mutex; struct device *dev; + struct dsa_switch_ops ops; }; struct qca8k_mib_desc { diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 808abb6b3671..b15752267c8d 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev) static void set_rx_mode(struct net_device *dev) { int ioaddr = dev->base_addr; - short new_mode; + unsigned short new_mode; if (dev->flags & IFF_PROMISC) { if (corkscrew_debug > 3) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 98c2352b1b83..a44171fddf47 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, *hclk = devm_clk_get(&pdev->dev, "hclk"); } - if (IS_ERR(*pclk)) { + if (IS_ERR_OR_NULL(*pclk)) { err = PTR_ERR(*pclk); + if (!err) + err = -ENODEV; + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); return err; } - if (IS_ERR(*hclk)) { + if (IS_ERR_OR_NULL(*hclk)) { err = PTR_ERR(*hclk); + if (!err) + err = -ENODEV; + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); return err; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 3130b43bba52..02959035ed3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) } /* should never happen! */ - BUG_ON(1); + BUG(); return NULL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 88773ca58e6b..b3da81e90132 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter, break; default: - BUG_ON(1); + BUG(); } return buf_size; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index e923e5cecc4e..2055c97dc22b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) */ queue_mapping = skb_get_queue_mapping(skb); fq = &priv->fq[queue_mapping]; + + fd_len = dpaa2_fd_get_len(&fd); + nq = netdev_get_tx_queue(net_dev, queue_mapping); + netdev_tx_sent_queue(nq, fd_len); + + /* Everything that happens after this enqueues might race with + * the Tx confirmation callback for this frame + */ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) @@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ free_tx_fd(priv, fq, &fd, false); + netdev_tx_completed_queue(nq, 1, fd_len); } else { - fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; percpu_stats->tx_bytes += fd_len; - - nq = netdev_get_tx_queue(net_dev, queue_mapping); - netdev_tx_sent_queue(nq, fd_len); } return NETDEV_TX_OK; @@ -1823,7 +1828,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, dpaa2_fd_set_format(&fd, dpaa2_fd_single); dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); - fq = &priv->fq[smp_processor_id()]; + fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1f4d181f169f..21085c4bf66b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -22,6 +22,7 @@ #include "hns3_enet.h" #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) +#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); @@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc_cb->length = size; - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; @@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + buf_num = hns3_tx_bd_count(size); frag_num = skb_shinfo(skb)->nr_frags; for (i = 0; i < frag_num; i++) { frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> - HNS3_MAX_BD_SIZE_OFFSET; + bdnum_for_frag = hns3_tx_bd_count(size); if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; @@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, } if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> - HNS3_MAX_BD_SIZE_OFFSET; + buf_num = hns3_tx_bd_count(skb->len); if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ @@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = hns3_tx_bd_count(skb->len); if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1db0bd41d209..75669cd0c311 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -193,7 +193,6 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_SIZE_OFFSET 16 #define HNS3_MAX_BD_PER_FRAG 8 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3baabdc89726..90b62c1412c8 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev, if (ehea_add_adapter_mr(adapter)) { pr_err("creating MR failed\n"); + of_node_put(eth_dn); return -EIO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 370ca94b6775..b8ba74de9555 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -40,6 +40,9 @@ #include "mlx5_core.h" #include "lib/eq.h" +static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct); + static struct mlx5_core_rsc_common * mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) { @@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev, wait_for_completion(&qp->common.free); } +static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct, bool need_cleanup) +{ + u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; + struct mlx5_core_qp *qp = &dct->mqp; + int err; + + err = mlx5_core_drain_dct(dev, dct); + if (err) { + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + goto destroy; + } else { + mlx5_core_warn( + dev, "failed drain DCT 0x%x with error 0x%x\n", + qp->qpn, err); + return err; + } + } + wait_for_completion(&dct->drained); +destroy: + if (need_cleanup) + destroy_resource_common(dev, &dct->mqp); + MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); + MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); + MLX5_SET(destroy_dct_in, in, uid, qp->uid); + err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + return err; +} + int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, - u32 *in, int inlen) + u32 *in, int inlen, + u32 *out, int outlen) { - u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0}; - u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; - u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; struct mlx5_core_qp *qp = &dct->mqp; int err; init_completion(&dct->drained); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); if (err) { mlx5_core_warn(dev, "create DCT failed, ret %d\n", err); return err; @@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, return 0; err_cmd: - MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); - MLX5_SET(destroy_dct_in, din, dctn, qp->qpn); - MLX5_SET(destroy_dct_in, din, uid, qp->uid); - mlx5_cmd_exec(dev, (void *)&in, sizeof(din), - (void *)&out, sizeof(dout)); + _mlx5_core_destroy_dct(dev, dct, false); return err; } EXPORT_SYMBOL_GPL(mlx5_core_create_dct); @@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct) { - u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; - struct mlx5_core_qp *qp = &dct->mqp; - int err; - - err = mlx5_core_drain_dct(dev, dct); - if (err) { - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - goto destroy; - } else { - mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err); - return err; - } - } - wait_for_completion(&dct->drained); -destroy: - destroy_resource_common(dev, &dct->mqp); - MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); - MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); - MLX5_SET(destroy_dct_in, in, uid, qp->uid); - err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), - (void *)&out, sizeof(out)); - return err; + return _mlx5_core_destroy_dct(dev, dct, true); } EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 7a15e932ed2f..c1c1965d7acc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, return 0; default: /* Do not consider thresholds for zero temperature. */ - if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { + if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) { *temp = 0; return 0; } diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index bd6e9014bc74..7849119d407a 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -142,6 +142,12 @@ struct ks8851_net { static int msg_enable; +/* SPI frame opcodes */ +#define KS_SPIOP_RD (0x00) +#define KS_SPIOP_WR (0x40) +#define KS_SPIOP_RXFIFO (0x80) +#define KS_SPIOP_TXFIFO (0xC0) + /* shift for byte-enable data */ #define BYTE_EN(_x) ((_x) << 2) @@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) /* set dma read address */ ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); - /* start the packet dma process, and set auto-dequeue rx */ - ks8851_wrreg16(ks, KS_RXQCR, - ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); + /* start DMA access */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); if (rxlen > 4) { unsigned int rxalign; @@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) } } - ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + /* end DMA access and dequeue packet */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF); } } @@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work) static int ks8851_net_open(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + int ret; + + ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + dev->name, ks); + if (ret < 0) { + netdev_err(dev, "failed to get irq\n"); + return ret; + } /* lock the card, even if we may not actually be doing anything * else at the moment */ @@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev) netif_dbg(ks, ifup, ks->netdev, "network device up\n"); mutex_unlock(&ks->lock); + mii_check_link(&ks->mii); return 0; } @@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev) dev_kfree_skb(txb); } + free_irq(dev->irq, ks); + return 0; } @@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi) spi_set_drvdata(spi, ks); + netif_carrier_off(ks->netdev); ndev->if_port = IF_PORT_100BASET; ndev->netdev_ops = &ks8851_netdev_ops; ndev->irq = spi->irq; @@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi) ks8851_read_selftest(ks); ks8851_init_mac(ks); - ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - ndev->name, ks); - if (ret < 0) { - dev_err(&spi->dev, "failed to get irq\n"); - goto err_irq; - } - ret = register_netdev(ndev); if (ret) { dev_err(&spi->dev, "failed to register network device\n"); @@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi) return 0; - err_netdev: - free_irq(ndev->irq, ks); - -err_irq: +err_id: if (gpio_is_valid(gpio)) gpio_set_value(gpio, 0); -err_id: regulator_disable(ks->vdd_reg); err_reg: regulator_disable(ks->vdd_io); @@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi) dev_info(&spi->dev, "remove\n"); unregister_netdev(priv->netdev); - free_irq(spi->irq, priv); if (gpio_is_valid(priv->gpio)) gpio_set_value(priv->gpio, 0); regulator_disable(priv->vdd_reg); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index 852256ef1f22..23da1e3ee429 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -11,9 +11,15 @@ */ #define KS_CCR 0x08 +#define CCR_LE (1 << 10) /* KSZ8851-16MLL */ #define CCR_EEPROM (1 << 9) -#define CCR_SPI (1 << 8) -#define CCR_32PIN (1 << 0) +#define CCR_SPI (1 << 8) /* KSZ8851SNL */ +#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */ +#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */ +#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */ +#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */ +#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */ +#define CCR_32PIN (1 << 0) /* KSZ8851SNL */ /* MAC address registers */ #define KS_MAR(_m) (0x15 - (_m)) @@ -112,13 +118,13 @@ #define RXCR1_RXE (1 << 0) #define KS_RXCR2 0x76 -#define RXCR2_SRDBL_MASK (0x7 << 5) -#define RXCR2_SRDBL_SHIFT (5) -#define RXCR2_SRDBL_4B (0x0 << 5) -#define RXCR2_SRDBL_8B (0x1 << 5) -#define RXCR2_SRDBL_16B (0x2 << 5) -#define RXCR2_SRDBL_32B (0x3 << 5) -#define RXCR2_SRDBL_FRAME (0x4 << 5) +#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */ #define RXCR2_IUFFP (1 << 4) #define RXCR2_RXIUFCEZ (1 << 3) #define RXCR2_UDPLFE (1 << 2) @@ -143,8 +149,10 @@ #define RXFSHR_RXCE (1 << 0) #define KS_RXFHBCR 0x7E +#define RXFHBCR_CNT_MASK (0xfff << 0) + #define KS_TXQCR 0x80 -#define TXQCR_AETFE (1 << 2) +#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */ #define TXQCR_TXQMAM (1 << 1) #define TXQCR_METFE (1 << 0) @@ -167,6 +175,10 @@ #define KS_RXFDPR 0x86 #define RXFDPR_RXFPAI (1 << 14) +#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */ +#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */ +#define RXFDPR_RXFP_MASK (0x7ff << 0) +#define RXFDPR_RXFP_SHIFT (0) #define KS_RXDTTR 0x8C #define KS_RXDBCTR 0x8E @@ -184,7 +196,7 @@ #define IRQ_RXMPDI (1 << 4) #define IRQ_LDI (1 << 3) #define IRQ_EDI (1 << 2) -#define IRQ_SPIBEI (1 << 1) +#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */ #define IRQ_DEDI (1 << 0) #define KS_RXFCTR 0x9C @@ -257,42 +269,37 @@ #define KS_P1ANLPR 0xEE #define KS_P1SCLMD 0xF4 -#define P1SCLMD_LEDOFF (1 << 15) -#define P1SCLMD_TXIDS (1 << 14) -#define P1SCLMD_RESTARTAN (1 << 13) -#define P1SCLMD_DISAUTOMDIX (1 << 10) -#define P1SCLMD_FORCEMDIX (1 << 9) -#define P1SCLMD_AUTONEGEN (1 << 7) -#define P1SCLMD_FORCE100 (1 << 6) -#define P1SCLMD_FORCEFDX (1 << 5) -#define P1SCLMD_ADV_FLOW (1 << 4) -#define P1SCLMD_ADV_100BT_FDX (1 << 3) -#define P1SCLMD_ADV_100BT_HDX (1 << 2) -#define P1SCLMD_ADV_10BT_FDX (1 << 1) -#define P1SCLMD_ADV_10BT_HDX (1 << 0) #define KS_P1CR 0xF6 -#define P1CR_HP_MDIX (1 << 15) -#define P1CR_REV_POL (1 << 13) -#define P1CR_OP_100M (1 << 10) -#define P1CR_OP_FDX (1 << 9) -#define P1CR_OP_MDI (1 << 7) -#define P1CR_AN_DONE (1 << 6) -#define P1CR_LINK_GOOD (1 << 5) -#define P1CR_PNTR_FLOW (1 << 4) -#define P1CR_PNTR_100BT_FDX (1 << 3) -#define P1CR_PNTR_100BT_HDX (1 << 2) -#define P1CR_PNTR_10BT_FDX (1 << 1) -#define P1CR_PNTR_10BT_HDX (1 << 0) +#define P1CR_LEDOFF (1 << 15) +#define P1CR_TXIDS (1 << 14) +#define P1CR_RESTARTAN (1 << 13) +#define P1CR_DISAUTOMDIX (1 << 10) +#define P1CR_FORCEMDIX (1 << 9) +#define P1CR_AUTONEGEN (1 << 7) +#define P1CR_FORCE100 (1 << 6) +#define P1CR_FORCEFDX (1 << 5) +#define P1CR_ADV_FLOW (1 << 4) +#define P1CR_ADV_100BT_FDX (1 << 3) +#define P1CR_ADV_100BT_HDX (1 << 2) +#define P1CR_ADV_10BT_FDX (1 << 1) +#define P1CR_ADV_10BT_HDX (1 << 0) + +#define KS_P1SR 0xF8 +#define P1SR_HP_MDIX (1 << 15) +#define P1SR_REV_POL (1 << 13) +#define P1SR_OP_100M (1 << 10) +#define P1SR_OP_FDX (1 << 9) +#define P1SR_OP_MDI (1 << 7) +#define P1SR_AN_DONE (1 << 6) +#define P1SR_LINK_GOOD (1 << 5) +#define P1SR_PNTR_FLOW (1 << 4) +#define P1SR_PNTR_100BT_FDX (1 << 3) +#define P1SR_PNTR_100BT_HDX (1 << 2) +#define P1SR_PNTR_10BT_FDX (1 << 1) +#define P1SR_PNTR_10BT_HDX (1 << 0) /* TX Frame control */ - #define TXFR_TXIC (1 << 15) #define TXFR_TXFID_MASK (0x3f << 0) #define TXFR_TXFID_SHIFT (0) - -/* SPI frame opcodes */ -#define KS_SPIOP_RD (0x00) -#define KS_SPIOP_WR (0x40) -#define KS_SPIOP_RXFIFO (0x80) -#define KS_SPIOP_TXFIFO (0xC0) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 35f8c9ef204d..c946841c0a06 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -40,6 +40,8 @@ #include <linux/of_device.h> #include <linux/of_net.h> +#include "ks8851.h" + #define DRV_NAME "ks8851_mll" static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; @@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; #define TX_BUF_SIZE 2000 #define RX_BUF_SIZE 2000 -#define KS_CCR 0x08 -#define CCR_EEPROM (1 << 9) -#define CCR_SPI (1 << 8) -#define CCR_8BIT (1 << 7) -#define CCR_16BIT (1 << 6) -#define CCR_32BIT (1 << 5) -#define CCR_SHARED (1 << 4) -#define CCR_32PIN (1 << 0) - -/* MAC address registers */ -#define KS_MARL 0x10 -#define KS_MARM 0x12 -#define KS_MARH 0x14 - -#define KS_OBCR 0x20 -#define OBCR_ODS_16MA (1 << 6) - -#define KS_EEPCR 0x22 -#define EEPCR_EESA (1 << 4) -#define EEPCR_EESB (1 << 3) -#define EEPCR_EEDO (1 << 2) -#define EEPCR_EESCK (1 << 1) -#define EEPCR_EECS (1 << 0) - -#define KS_MBIR 0x24 -#define MBIR_TXMBF (1 << 12) -#define MBIR_TXMBFA (1 << 11) -#define MBIR_RXMBF (1 << 4) -#define MBIR_RXMBFA (1 << 3) - -#define KS_GRR 0x26 -#define GRR_QMU (1 << 1) -#define GRR_GSR (1 << 0) - -#define KS_WFCR 0x2A -#define WFCR_MPRXE (1 << 7) -#define WFCR_WF3E (1 << 3) -#define WFCR_WF2E (1 << 2) -#define WFCR_WF1E (1 << 1) -#define WFCR_WF0E (1 << 0) - -#define KS_WF0CRC0 0x30 -#define KS_WF0CRC1 0x32 -#define KS_WF0BM0 0x34 -#define KS_WF0BM1 0x36 -#define KS_WF0BM2 0x38 -#define KS_WF0BM3 0x3A - -#define KS_WF1CRC0 0x40 -#define KS_WF1CRC1 0x42 -#define KS_WF1BM0 0x44 -#define KS_WF1BM1 0x46 -#define KS_WF1BM2 0x48 -#define KS_WF1BM3 0x4A - -#define KS_WF2CRC0 0x50 -#define KS_WF2CRC1 0x52 -#define KS_WF2BM0 0x54 -#define KS_WF2BM1 0x56 -#define KS_WF2BM2 0x58 -#define KS_WF2BM3 0x5A - -#define KS_WF3CRC0 0x60 -#define KS_WF3CRC1 0x62 -#define KS_WF3BM0 0x64 -#define KS_WF3BM1 0x66 -#define KS_WF3BM2 0x68 -#define KS_WF3BM3 0x6A - -#define KS_TXCR 0x70 -#define TXCR_TCGICMP (1 << 8) -#define TXCR_TCGUDP (1 << 7) -#define TXCR_TCGTCP (1 << 6) -#define TXCR_TCGIP (1 << 5) -#define TXCR_FTXQ (1 << 4) -#define TXCR_TXFCE (1 << 3) -#define TXCR_TXPE (1 << 2) -#define TXCR_TXCRC (1 << 1) -#define TXCR_TXE (1 << 0) - -#define KS_TXSR 0x72 -#define TXSR_TXLC (1 << 13) -#define TXSR_TXMC (1 << 12) -#define TXSR_TXFID_MASK (0x3f << 0) -#define TXSR_TXFID_SHIFT (0) -#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) - - -#define KS_RXCR1 0x74 -#define RXCR1_FRXQ (1 << 15) -#define RXCR1_RXUDPFCC (1 << 14) -#define RXCR1_RXTCPFCC (1 << 13) -#define RXCR1_RXIPFCC (1 << 12) -#define RXCR1_RXPAFMA (1 << 11) -#define RXCR1_RXFCE (1 << 10) -#define RXCR1_RXEFE (1 << 9) -#define RXCR1_RXMAFMA (1 << 8) -#define RXCR1_RXBE (1 << 7) -#define RXCR1_RXME (1 << 6) -#define RXCR1_RXUE (1 << 5) -#define RXCR1_RXAE (1 << 4) -#define RXCR1_RXINVF (1 << 1) -#define RXCR1_RXE (1 << 0) #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ RXCR1_RXMAFMA | RXCR1_RXPAFMA) - -#define KS_RXCR2 0x76 -#define RXCR2_SRDBL_MASK (0x7 << 5) -#define RXCR2_SRDBL_SHIFT (5) -#define RXCR2_SRDBL_4B (0x0 << 5) -#define RXCR2_SRDBL_8B (0x1 << 5) -#define RXCR2_SRDBL_16B (0x2 << 5) -#define RXCR2_SRDBL_32B (0x3 << 5) -/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */ -#define RXCR2_IUFFP (1 << 4) -#define RXCR2_RXIUFCEZ (1 << 3) -#define RXCR2_UDPLFE (1 << 2) -#define RXCR2_RXICMPFCC (1 << 1) -#define RXCR2_RXSAF (1 << 0) - -#define KS_TXMIR 0x78 - -#define KS_RXFHSR 0x7C -#define RXFSHR_RXFV (1 << 15) -#define RXFSHR_RXICMPFCS (1 << 13) -#define RXFSHR_RXIPFCS (1 << 12) -#define RXFSHR_RXTCPFCS (1 << 11) -#define RXFSHR_RXUDPFCS (1 << 10) -#define RXFSHR_RXBF (1 << 7) -#define RXFSHR_RXMF (1 << 6) -#define RXFSHR_RXUF (1 << 5) -#define RXFSHR_RXMR (1 << 4) -#define RXFSHR_RXFT (1 << 3) -#define RXFSHR_RXFTL (1 << 2) -#define RXFSHR_RXRF (1 << 1) -#define RXFSHR_RXCE (1 << 0) -#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\ - RXFSHR_RXFTL | RXFSHR_RXMR |\ - RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\ - RXFSHR_RXTCPFCS) -#define KS_RXFHBCR 0x7E -#define RXFHBCR_CNT_MASK 0x0FFF - -#define KS_TXQCR 0x80 -#define TXQCR_AETFE (1 << 2) -#define TXQCR_TXQMAM (1 << 1) -#define TXQCR_METFE (1 << 0) - -#define KS_RXQCR 0x82 -#define RXQCR_RXDTTS (1 << 12) -#define RXQCR_RXDBCTS (1 << 11) -#define RXQCR_RXFCTS (1 << 10) -#define RXQCR_RXIPHTOE (1 << 9) -#define RXQCR_RXDTTE (1 << 7) -#define RXQCR_RXDBCTE (1 << 6) -#define RXQCR_RXFCTE (1 << 5) -#define RXQCR_ADRFE (1 << 4) -#define RXQCR_SDA (1 << 3) -#define RXQCR_RRXEF (1 << 0) #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) -#define KS_TXFDPR 0x84 -#define TXFDPR_TXFPAI (1 << 14) -#define TXFDPR_TXFP_MASK (0x7ff << 0) -#define TXFDPR_TXFP_SHIFT (0) - -#define KS_RXFDPR 0x86 -#define RXFDPR_RXFPAI (1 << 14) - -#define KS_RXDTTR 0x8C -#define KS_RXDBCTR 0x8E - -#define KS_IER 0x90 -#define KS_ISR 0x92 -#define IRQ_LCI (1 << 15) -#define IRQ_TXI (1 << 14) -#define IRQ_RXI (1 << 13) -#define IRQ_RXOI (1 << 11) -#define IRQ_TXPSI (1 << 9) -#define IRQ_RXPSI (1 << 8) -#define IRQ_TXSAI (1 << 6) -#define IRQ_RXWFDI (1 << 5) -#define IRQ_RXMPDI (1 << 4) -#define IRQ_LDI (1 << 3) -#define IRQ_EDI (1 << 2) -#define IRQ_SPIBEI (1 << 1) -#define IRQ_DEDI (1 << 0) - -#define KS_RXFCTR 0x9C -#define RXFCTR_THRESHOLD_MASK 0x00FF - -#define KS_RXFC 0x9D -#define RXFCTR_RXFC_MASK (0xff << 8) -#define RXFCTR_RXFC_SHIFT (8) -#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) -#define RXFCTR_RXFCT_MASK (0xff << 0) -#define RXFCTR_RXFCT_SHIFT (0) - -#define KS_TXNTFSR 0x9E - -#define KS_MAHTR0 0xA0 -#define KS_MAHTR1 0xA2 -#define KS_MAHTR2 0xA4 -#define KS_MAHTR3 0xA6 - -#define KS_FCLWR 0xB0 -#define KS_FCHWR 0xB2 -#define KS_FCOWR 0xB4 - -#define KS_CIDER 0xC0 -#define CIDER_ID 0x8870 -#define CIDER_REV_MASK (0x7 << 1) -#define CIDER_REV_SHIFT (1) -#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) - -#define KS_CGCR 0xC6 -#define KS_IACR 0xC8 -#define IACR_RDEN (1 << 12) -#define IACR_TSEL_MASK (0x3 << 10) -#define IACR_TSEL_SHIFT (10) -#define IACR_TSEL_MIB (0x3 << 10) -#define IACR_ADDR_MASK (0x1f << 0) -#define IACR_ADDR_SHIFT (0) - -#define KS_IADLR 0xD0 -#define KS_IAHDR 0xD2 - -#define KS_PMECR 0xD4 -#define PMECR_PME_DELAY (1 << 14) -#define PMECR_PME_POL (1 << 12) -#define PMECR_WOL_WAKEUP (1 << 11) -#define PMECR_WOL_MAGICPKT (1 << 10) -#define PMECR_WOL_LINKUP (1 << 9) -#define PMECR_WOL_ENERGY (1 << 8) -#define PMECR_AUTO_WAKE_EN (1 << 7) -#define PMECR_WAKEUP_NORMAL (1 << 6) -#define PMECR_WKEVT_MASK (0xf << 2) -#define PMECR_WKEVT_SHIFT (2) -#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) -#define PMECR_WKEVT_ENERGY (0x1 << 2) -#define PMECR_WKEVT_LINK (0x2 << 2) -#define PMECR_WKEVT_MAGICPKT (0x4 << 2) -#define PMECR_WKEVT_FRAME (0x8 << 2) -#define PMECR_PM_MASK (0x3 << 0) -#define PMECR_PM_SHIFT (0) -#define PMECR_PM_NORMAL (0x0 << 0) -#define PMECR_PM_ENERGY (0x1 << 0) -#define PMECR_PM_SOFTDOWN (0x2 << 0) -#define PMECR_PM_POWERSAVE (0x3 << 0) - -/* Standard MII PHY data */ -#define KS_P1MBCR 0xE4 -#define P1MBCR_FORCE_FDX (1 << 8) - -#define KS_P1MBSR 0xE6 -#define P1MBSR_AN_COMPLETE (1 << 5) -#define P1MBSR_AN_CAPABLE (1 << 3) -#define P1MBSR_LINK_UP (1 << 2) - -#define KS_PHY1ILR 0xE8 -#define KS_PHY1IHR 0xEA -#define KS_P1ANAR 0xEC -#define KS_P1ANLPR 0xEE - -#define KS_P1SCLMD 0xF4 -#define P1SCLMD_LEDOFF (1 << 15) -#define P1SCLMD_TXIDS (1 << 14) -#define P1SCLMD_RESTARTAN (1 << 13) -#define P1SCLMD_DISAUTOMDIX (1 << 10) -#define P1SCLMD_FORCEMDIX (1 << 9) -#define P1SCLMD_AUTONEGEN (1 << 7) -#define P1SCLMD_FORCE100 (1 << 6) -#define P1SCLMD_FORCEFDX (1 << 5) -#define P1SCLMD_ADV_FLOW (1 << 4) -#define P1SCLMD_ADV_100BT_FDX (1 << 3) -#define P1SCLMD_ADV_100BT_HDX (1 << 2) -#define P1SCLMD_ADV_10BT_FDX (1 << 1) -#define P1SCLMD_ADV_10BT_HDX (1 << 0) - -#define KS_P1CR 0xF6 -#define P1CR_HP_MDIX (1 << 15) -#define P1CR_REV_POL (1 << 13) -#define P1CR_OP_100M (1 << 10) -#define P1CR_OP_FDX (1 << 9) -#define P1CR_OP_MDI (1 << 7) -#define P1CR_AN_DONE (1 << 6) -#define P1CR_LINK_GOOD (1 << 5) -#define P1CR_PNTR_FLOW (1 << 4) -#define P1CR_PNTR_100BT_FDX (1 << 3) -#define P1CR_PNTR_100BT_HDX (1 << 2) -#define P1CR_PNTR_10BT_FDX (1 << 1) -#define P1CR_PNTR_10BT_HDX (1 << 0) - -/* TX Frame control */ - -#define TXFR_TXIC (1 << 15) -#define TXFR_TXFID_MASK (0x3f << 0) -#define TXFR_TXFID_SHIFT (0) - -#define KS_P1SR 0xF8 -#define P1SR_HP_MDIX (1 << 15) -#define P1SR_REV_POL (1 << 13) -#define P1SR_OP_100M (1 << 10) -#define P1SR_OP_FDX (1 << 9) -#define P1SR_OP_MDI (1 << 7) -#define P1SR_AN_DONE (1 << 6) -#define P1SR_LINK_GOOD (1 << 5) -#define P1SR_PNTR_FLOW (1 << 4) -#define P1SR_PNTR_100BT_FDX (1 << 3) -#define P1SR_PNTR_100BT_HDX (1 << 2) -#define P1SR_PNTR_10BT_FDX (1 << 1) -#define P1SR_PNTR_10BT_HDX (1 << 0) - #define ENUM_BUS_NONE 0 #define ENUM_BUS_8BIT 1 #define ENUM_BUS_16BIT 2 @@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks) ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ - ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); + ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK); /* Setup RxQ Command Control (RXQCR) */ ks->rc_rxqcr = RXQCR_CMD_CNTL; @@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks) */ w = ks_rdreg16(ks, KS_P1MBCR); - w &= ~P1MBCR_FORCE_FDX; + w &= ~BMCR_FULLDPLX; ks_wrreg16(ks, KS_P1MBCR, w); w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; @@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev) ks_setup_int(ks); data = ks_rdreg16(ks, KS_OBCR); - ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); + ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA); /* overwriting the default MAC address */ if (pdev->dev.of_node) { diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a4a35e27e33f..a8ca26c2ae0c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -678,6 +678,7 @@ struct rtl8169_private { struct work_struct work; } wk; + unsigned irq_enabled:1; unsigned supports_gmii:1; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; @@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) static void rtl_irq_disable(struct rtl8169_private *tp) { RTL_W16(tp, IntrMask, 0); + tp->irq_enabled = 0; } #define RTL_EVENT_NAPI_RX (RxOK | RxErr) @@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { + tp->irq_enabled = 1; RTL_W16(tp, IntrMask, tp->irq_mask); } @@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; u16 status = RTL_R16(tp, IntrStatus); - u16 irq_mask = RTL_R16(tp, IntrMask); - if (status == 0xffff || !(status & irq_mask)) + if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask)) return IRQ_NONE; if (unlikely(status & SYSErr)) { @@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); } - if (status & RTL_EVENT_NAPI) { + if (status & (RTL_EVENT_NAPI | LinkChg)) { rtl_irq_disable(tp); napi_schedule_irqoff(&tp->napi); } diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index f936166d8910..4d9bcb4d0378 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -113,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc) static void refill_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + struct stmmac_rx_queue *rx_q = priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + if (priv->dma_buf_sz == BUF_SIZE_16KiB) p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 5174d318901e..0a920c5936b2 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } ret = netcp_txpipe_open(&gbe_dev->tx_pipe); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } /* Create network interfaces */ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index ec7e7ec24ff9..4041c75997ba 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &dmares); if (ret) { dev_err(&pdev->dev, "unable to get DMA resource\n"); + of_node_put(np); goto free_netdev; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); + of_node_put(np); goto free_netdev; } lp->rx_irq = irq_of_parse_and_map(np, 1); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index cd1d8faccca5..cd6b95e673a5 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi) INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), WQ_MEM_RECLAIM); + if (unlikely(!lp->wqueue)) { + ret = -ENOMEM; + goto err_hw_init; + } ret = adf7242_hw_init(lp); if (ret) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 49866737f138..707285953750 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index e493db5e8744..1c66e92c717c 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE help MDIO devices and driver infrastructure code. +if MDIO_DEVICE + config MDIO_BUS tristate default m if PHYLIB=m @@ -179,6 +181,7 @@ config MDIO_XGENE APM X-Gene SoC's. endif +endif config PHYLINK tristate diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 9605d4fe540b..cb86a3e90c7d 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev) bcm54xx_phydsp_config(phydev); + /* Encode link speed into LED1 and LED3 pair (green/amber). + * Also flash these two LEDs on activity. This means configuring + * them for MULTICOLOR and encoding link/activity into them. + */ + val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) | + BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1); + bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val); + + val = BCM_LED_MULTICOLOR_IN_PHASE | + BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) | + BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT); + bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val); + return 0; } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index bbd8c22067f3..97d45bd5b38e 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -15,6 +15,8 @@ #include <linux/netdevice.h> #define DP83822_PHY_ID 0x2000a240 +#define DP83825I_PHY_ID 0x2000a150 + #define DP83822_DEVADDR 0x1f #define MII_DP83822_PHYSCR 0x11 @@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev) return 0; } +#define DP83822_PHY_DRIVER(_id, _name) \ + { \ + PHY_ID_MATCH_MODEL(_id), \ + .name = (_name), \ + .features = PHY_BASIC_FEATURES, \ + .soft_reset = dp83822_phy_reset, \ + .config_init = dp83822_config_init, \ + .get_wol = dp83822_get_wol, \ + .set_wol = dp83822_set_wol, \ + .ack_interrupt = dp83822_ack_interrupt, \ + .config_intr = dp83822_config_intr, \ + .suspend = dp83822_suspend, \ + .resume = dp83822_resume, \ + } + static struct phy_driver dp83822_driver[] = { - { - .phy_id = DP83822_PHY_ID, - .phy_id_mask = 0xfffffff0, - .name = "TI DP83822", - .features = PHY_BASIC_FEATURES, - .config_init = dp83822_config_init, - .soft_reset = dp83822_phy_reset, - .get_wol = dp83822_get_wol, - .set_wol = dp83822_set_wol, - .ack_interrupt = dp83822_ack_interrupt, - .config_intr = dp83822_config_intr, - .suspend = dp83822_suspend, - .resume = dp83822_resume, - }, + DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"), + DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), }; module_phy_driver(dp83822_driver); static struct mdio_device_id __maybe_unused dp83822_tbl[] = { { DP83822_PHY_ID, 0xfffffff0 }, + { DP83825I_PHY_ID, 0xfffffff0 }, { }, }; MODULE_DEVICE_TABLE(mdio, dp83822_tbl); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 49fdd1ee798e..77068c545de0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev) { int ret; - ret = phy_write(phydev, MII_BMCR, BMCR_RESET); + ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET); if (ret < 0) return ret; diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 820a2fe7d027..aff995be2a31 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = { .tx_fixup = aqc111_tx_fixup, }; +static const struct driver_info qnap_info = { + .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, + {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5512a1038721..3e9b2c319e45 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -851,6 +851,14 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index e9822a3ec373..94132cfd1f56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { - s64 rtt_avg = res->ftm.rtt_avg * 100; - - do_div(rtt_avg, 6666); + s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); IWL_DEBUG_INFO(mvm, "entry %d\n", index); IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6eedc0ec7661..76629b98c78d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, static void mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) { + iowrite32(q->desc_dma, &q->regs->desc_base); + iowrite32(q->ndesc, &q->regs->ring_size); q->head = ioread32(&q->regs->dma_idx); q->tail = q->head; iowrite32(q->head, &q->regs->cpu_idx); @@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) else mt76_dma_sync_idx(dev, q); - wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + wake = wake && q->stopped && + qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + if (wake) + q->stopped = false; if (!q->queued) wake_up(&dev->tx_wait); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index a033745adb2f..316167404729 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -679,19 +679,15 @@ out: return ret; } -static void -mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - int idx = wcid->idx; - int i; + int i, idx = wcid->idx; rcu_assign_pointer(dev->wcid[idx], NULL); synchronize_rcu(); - mutex_lock(&dev->mutex); - if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); @@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) mt76_txq_remove(dev, sta->txq[i]); mt76_wcid_free(dev->wcid_mask, idx); +} +EXPORT_SYMBOL_GPL(__mt76_sta_remove); +static void +mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + mutex_lock(&dev->mutex); + __mt76_sta_remove(dev, vif, sta); mutex_unlock(&dev->mutex); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5dfb0601f101..bcbfd3c4a44b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -126,6 +126,7 @@ struct mt76_queue { int ndesc; int queued; int buf_size; + bool stopped; u8 buf_offset; u8 hw_idx; @@ -143,6 +144,7 @@ struct mt76_mcu_ops { const struct mt76_reg_pair *rp, int len); int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *rp, int len); + int (*mcu_restart)(struct mt76_dev *dev); }; struct mt76_queue_ops { @@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); +void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index afcd86f735b4..4dcb465095d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) out: mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); - if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > - __sw_hweight8(dev->beacon_mask)) + if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask)) dev->beacon_check++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index d69e82c66ab2..b3ae0aaea62a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -27,12 +27,16 @@ static void mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) { __le32 *txd = (__le32 *)skb->data; + struct ieee80211_hdr *hdr; + struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; + void *priv; int idx; u32 val; + u8 tid; - if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) + if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) goto free; val = le32_to_cpu(txd[1]); @@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (!wcid) goto free; - msta = container_of(wcid, struct mt7603_sta, wcid); + priv = msta = container_of(wcid, struct mt7603_sta, wcid); val = le32_to_cpu(txd[0]); skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); + val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); + val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); + txd[0] = cpu_to_le32(val); + + sta = container_of(priv, struct ieee80211_sta, drv_priv); + hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + ieee80211_sta_set_buffered(sta, tid, true); + spin_lock_bh(&dev->ps_lock); __skb_queue_tail(&msta->psq, skb); if (skb_queue_len(&msta->psq) >= 64) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 15cc8f33b34d..d54dda67d036 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -112,7 +112,7 @@ static void mt7603_phy_init(struct mt7603_dev *dev) { int rx_chains = dev->mt76.antenna_mask; - int tx_chains = __sw_hweight8(rx_chains) - 1; + int tx_chains = hweight8(rx_chains) - 1; mt76_rmw(dev, MT_WF_RMAC_RMCR, (MT_WF_RMAC_RMCR_SMPS_MODE | diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 0a0115861b51..5e31d7da96fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1072,7 +1072,7 @@ out: case MT_PHY_TYPE_HT: final_rate_flags |= IEEE80211_TX_RC_MCS; final_rate &= GENMASK(5, 0); - if (i > 15) + if (final_rate > 15) return false; break; default: diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index b10775ed92e6..cc0fe0933b2d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/module.h> #include "mt7603.h" +#include "mac.h" #include "eeprom.h" static int @@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) } static void +mt7603_ps_set_more_data(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); +} + +static void mt7603_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int nframes, @@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, __skb_queue_head_init(&list); + mt7603_wtbl_set_ps(dev, msta, false); + spin_lock_bh(&dev->ps_lock); skb_queue_walk_safe(&msta->psq, skb, tmp) { if (!nframes) @@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, skb_set_queue_mapping(skb, MT_TXQ_PSD); __skb_unlink(skb, &msta->psq); + mt7603_ps_set_more_data(skb); __skb_queue_tail(&list, skb); nframes--; } spin_unlock_bh(&dev->ps_lock); + if (!skb_queue_empty(&list)) + ieee80211_sta_eosp(sta); + mt7603_ps_tx_list(dev, &list); if (nframes) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 4b0713f1fd5e..d06905ea8cc6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) { struct cfg80211_chan_def *chandef = &dev->mt76.chandef; struct ieee80211_hw *hw = mt76_hw(dev); - int n_chains = __sw_hweight8(dev->mt76.antenna_mask); + int n_chains = hweight8(dev->mt76.antenna_mask); struct { u8 control_chan; u8 center_chan; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index e13fea80d970..b920be1f5718 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev) } mem_base = devm_ioremap_resource(&pdev->dev, res); - if (!mem_base) { + if (IS_ERR(mem_base)) { dev_err(&pdev->dev, "Failed to get memory resource\n"); - return -EINVAL; + return PTR_ERR(mem_base); } mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 0290ba5869a5..736f81752b5b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = { { MT_MM20_PROT_CFG, 0x01742004 }, { MT_MM40_PROT_CFG, 0x03f42084 }, { MT_TXOP_CTRL_CFG, 0x0000583f }, - { MT_TX_RTS_CFG, 0x00092b20 }, + { MT_TX_RTS_CFG, 0x00ffff20 }, { MT_EXP_ACK_TIME, 0x002400ca }, { MT_TXOP_HLDR_ET, 0x00000002 }, { MT_XIFS_TIME_CFG, 0x33a41010 }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 91718647da02..e5a06f74a6f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt76x02_dev *dev; struct mt76_dev *mdev; - u32 asic_rev, mac_rev; + u32 mac_rev; int ret; mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, @@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, goto err; } - asic_rev = mt76_rr(dev, MT_ASIC_VERSION); + mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); mac_rev = mt76_rr(dev, MT_MAC_CSR0); dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", - asic_rev, mac_rev); + mdev->rev, mac_rev); + if (!is_mt76x0(dev)) { + ret = -ENODEV; + goto err; + } /* Note: vendor driver skips this check for MT76X0U */ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 6915cce5def9..07061eb4d1e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -51,6 +51,7 @@ struct mt76x02_calibration { u16 false_cca; s8 avg_rssi_all; s8 agc_gain_adjust; + s8 agc_lowest_gain; s8 low_gain; s8 temp_vco; @@ -114,8 +115,11 @@ struct mt76x02_dev { struct mt76x02_dfs_pattern_detector dfs_pd; /* edcca monitor */ + unsigned long ed_trigger_timeout; bool ed_tx_blocked; bool ed_monitor; + u8 ed_monitor_enabled; + u8 ed_monitor_learning; u8 ed_trigger; u8 ed_silent; ktime_t ed_time; @@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_init_debugfs(struct mt76x02_dev *dev); +static inline bool is_mt76x0(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7610 || + mt76_chip(&dev->mt76) == 0x7630 || + mt76_chip(&dev->mt76) == 0x7650; +} + static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index 7580c5c986ff..b1d6fd4861e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data) return 0; } +static int +mt76_edcca_set(void *data, u64 val) +{ + struct mt76x02_dev *dev = data; + enum nl80211_dfs_regions region = dev->dfs_pd.region; + + dev->ed_monitor_enabled = !!val; + dev->ed_monitor = dev->ed_monitor_enabled && + region == NL80211_DFS_ETSI; + mt76x02_edcca_init(dev, true); + + return 0; +} + +static int +mt76_edcca_get(void *data, u64 *val) +{ + struct mt76x02_dev *dev = data; + + *val = dev->ed_monitor_enabled; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set, + "%lld\n"); + void mt76x02_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; @@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); + debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca); debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index e4649103efd4..17d12d212d1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev, if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); - dev->ed_monitor = region == NL80211_DFS_ETSI; + dev->ed_monitor = dev->ed_monitor_enabled && + region == NL80211_DFS_ETSI; mt76x02_edcca_init(dev, true); dfs_pd->region = region; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 91ff6598eccf..9ed231abe916 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, } EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); +void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76x02_cipher_type cipher; + u8 key_data[32]; + u32 iv, eiv; + u64 pn; + + cipher = mt76x02_mac_get_key_info(key, key_data); + iv = mt76_rr(dev, MT_WCID_IV(idx)); + eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4); + + pn = (u64)eiv << 16; + if (cipher == MT_CIPHER_TKIP) { + pn |= (iv >> 16) & 0xff; + pn |= (iv & 0xff) << 8; + } else if (cipher >= MT_CIPHER_AES_CCMP) { + pn |= iv & 0xffff; + } else { + return; + } + + atomic64_set(&key->tx_pn, pn); +} + + int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, struct ieee80211_key_conf *key) { enum mt76x02_cipher_type cipher; u8 key_data[32]; u8 iv_data[8]; + u64 pn; cipher = mt76x02_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) @@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, if (key) { mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + + pn = atomic64_read(&key->tx_pn); + iv_data[3] = key->keyidx << 6; - if (cipher >= MT_CIPHER_TKIP) + if (cipher >= MT_CIPHER_TKIP) { iv_data[3] |= 0x20; + put_unaligned_le32(pn >> 16, &iv_data[4]); + } + + if (cipher == MT_CIPHER_TKIP) { + iv_data[0] = (pn >> 8) & 0xff; + iv_data[1] = (iv_data[0] | 0x20) & 0x7f; + iv_data[2] = pn & 0xff; + } else if (cipher >= MT_CIPHER_AES_CCMP) { + put_unaligned_le16((pn & 0xffff), &iv_data[0]); + } } mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); @@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable) } } mt76x02_edcca_tx_enable(dev, true); + dev->ed_monitor_learning = true; /* clear previous CCA timer value */ mt76_rr(dev, MT_ED_CCA_TIMER); @@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init); #define MT_EDCCA_TH 92 #define MT_EDCCA_BLOCK_TH 2 +#define MT_EDCCA_LEARN_TH 50 +#define MT_EDCCA_LEARN_CCA 180 +#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ) + static void mt76x02_edcca_check(struct mt76x02_dev *dev) { ktime_t cur_time; @@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev) dev->ed_trigger = 0; } - if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && - !dev->ed_tx_blocked) + if (dev->cal.agc_lowest_gain && + dev->cal.false_cca > MT_EDCCA_LEARN_CCA && + dev->ed_trigger > MT_EDCCA_LEARN_TH) { + dev->ed_monitor_learning = false; + dev->ed_trigger_timeout = jiffies + 20 * HZ; + } else if (!dev->ed_monitor_learning && + time_is_after_jiffies(dev->ed_trigger_timeout)) { + dev->ed_monitor_learning = true; + mt76x02_edcca_tx_enable(dev, true); + } + + if (dev->ed_monitor_learning) + return; + + if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked) mt76x02_edcca_tx_enable(dev, false); - else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && - dev->ed_tx_blocked) + else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked) mt76x02_edcca_tx_enable(dev, true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 6b1f25d2f64c..caeeef96c42f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key); int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, struct ieee80211_key_conf *key); +void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key); void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, u8 *mac); void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 1229f19f2b02..daaed1220147 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -19,6 +19,7 @@ #include <linux/irq.h> #include "mt76x02.h" +#include "mt76x02_mcu.h" #include "mt76x02_trace.h" struct beacon_bc_data { @@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev) return i < 4; } +static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, void *data) +{ + struct mt76x02_dev *dev = hw->priv; + struct mt76_wcid *wcid; + + if (!sta) + return; + + wcid = (struct mt76_wcid *) sta->drv_priv; + + if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv) + return; + + mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); +} + +static void mt76x02_reset_state(struct mt76x02_dev *dev) +{ + int i; + + lockdep_assert_held(&dev->mt76.mutex); + + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + + rcu_read_lock(); + ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); + rcu_read_unlock(); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct mt76x02_sta *msta; + struct mt76_wcid *wcid; + void *priv; + + wcid = rcu_dereference_protected(dev->mt76.wcid[i], + lockdep_is_held(&dev->mt76.mutex)); + if (!wcid) + continue; + + priv = msta = container_of(wcid, struct mt76x02_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, drv_priv); + + priv = msta->vif; + vif = container_of(priv, struct ieee80211_vif, drv_priv); + + __mt76_sta_remove(&dev->mt76, vif, sta); + memset(msta, 0, sizeof(*msta)); + } + + dev->vif_mask = 0; + dev->beacon_mask = 0; +} + static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) { u32 mask = dev->mt76.mmio.irqmask; + bool restart = dev->mt76.mcu_ops->mcu_restart; int i; ieee80211_stop_queues(dev->mt76.hw); @@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) mutex_lock(&dev->mt76.mutex); + if (restart) + mt76x02_reset_state(dev); + if (dev->beacon_mask) mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_BEACON_TX | @@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) /* let fw reset DMA */ mt76_set(dev, 0x734, 0x3); + if (restart) + dev->mt76.mcu_ops->mcu_restart(&dev->mt76); + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) mt76_queue_tx_cleanup(dev, i, true); for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) mt76_queue_rx_reset(dev, i); - mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + mt76x02_mac_start(dev); + if (dev->ed_monitor) mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); - if (dev->beacon_mask) + if (dev->beacon_mask && !restart) mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_BEACON_TX | MT_BEACON_TIME_CFG_TBTT_EN); @@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) napi_schedule(&dev->mt76.napi[i]); } - ieee80211_wake_queues(dev->mt76.hw); - - mt76_txq_schedule_all(&dev->mt76); + if (restart) { + mt76x02_mcu_function_select(dev, Q_SELECT, 1); + ieee80211_restart_hw(dev->mt76.hw); + } else { + ieee80211_wake_queues(dev->mt76.hw); + mt76_txq_schedule_all(&dev->mt76); + } } static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index a020c757ba5c..a54b63a96eae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) ret = true; } + dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit; + return ret; } EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 43f07461c8d3..6fb52b596d42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, mt76x02_insert_hdr_pad(skb); - txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); + txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi)); mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); + skb_push(skb, sizeof(struct mt76x02_txwi)); pid = mt76_tx_status_skb_add(mdev, wcid, skb); txwi->pktid = pid; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index a48c261b0c63..cd072ac614f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; int idx = 0; + memset(msta, 0, sizeof(*msta)); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); if (idx < 0) return -ENOSPC; @@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76_txq *mtxq; + memset(mvif, 0, sizeof(*mvif)); + mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; @@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct mt76x02_dev *dev = hw->priv; unsigned int idx = 0; + /* Allow to change address in HW if we create first interface. */ + if (!dev->vif_mask && + (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || + memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) + mt76x02_mac_setaddr(dev, vif->addr); + if (vif->addr[0] & BIT(1)) idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); @@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) if (dev->vif_mask & BIT(idx)) return -EBUSY; - /* Allow to change address in HW if we create first interface. */ - if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr)) - mt76x02_mac_setaddr(dev, vif->addr); - dev->vif_mask |= BIT(idx); mt76x02_vif_init(dev, vif, idx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index f8534362e2c8..a30ef2c5a9db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) { MT_TX_SW_CFG1, 0x00010000 }, { MT_TX_SW_CFG2, 0x00000000 }, { MT_TXOP_CTRL_CFG, 0x0400583f }, - { MT_TX_RTS_CFG, 0x00100020 }, + { MT_TX_RTS_CFG, 0x00ffff20 }, { MT_TX_TIMEOUT_CFG, 0x000a2290 }, { MT_TX_RETRY_CFG, 0x47f01f0f }, { MT_EXP_ACK_TIME, 0x002c00dc }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index 6c619f1c65c9..d7abe3d73bad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, void mt76x2_cleanup(struct mt76x02_dev *dev); +int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard); void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 984d9c4c2e1a..d3927a13e92e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev) } } -static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) +int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { const u8 *macaddr = dev->mt76.macaddr; u32 val; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index 03e24ae7f66c..605dc66ae83b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -165,9 +165,30 @@ error: return -ENOENT; } +static int +mt76pci_mcu_restart(struct mt76_dev *mdev) +{ + struct mt76x02_dev *dev; + int ret; + + dev = container_of(mdev, struct mt76x02_dev, mt76); + + mt76x02_mcu_cleanup(dev); + mt76x2_mac_reset(dev, true); + + ret = mt76pci_load_firmware(dev); + if (ret) + return ret; + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + + return 0; +} + int mt76x2_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x2_mcu_ops = { + .mcu_restart = mt76pci_mcu_restart, .mcu_send_msg = mt76x02_mcu_msg_send, }; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index 1848e8ab2e21..769a9b972044 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; - if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) + val = 0x1836 << 16; + if (!mt76x2_has_ext_lna(dev) && + dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) val = 0x1e42 << 16; - else - val = 0x1836 << 16; + + if (mt76x2_has_ext_lna(dev) && + dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ && + dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40) + val = 0x0f36 << 16; val |= 0xf8; @@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 *gain = dev->cal.agc_gain_init; u8 low_gain_delta, gain_delta; + u32 agc_35, agc_37; bool gain_change; int low_gain; u32 val; @@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) else low_gain_delta = 14; + agc_37 = 0x2121262c; + if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) + agc_35 = 0x11111516; + else if (low_gain == 2) + agc_35 = agc_37 = 0x08080808; + else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + agc_35 = 0x10101014; + else + agc_35 = 0x11111116; + if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); @@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) dev->cal.agc_gain_adjust = 0; } else { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) - mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); - else - mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); - mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); gain_delta = 0; dev->cal.agc_gain_adjust = low_gain_delta; } + mt76_wr(dev, MT_BBP(AGC, 35), agc_35); + mt76_wr(dev, MT_BBP(AGC, 37), agc_37); + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; mt76x2_phy_set_gain_val(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index ddb6b2c48e01..ac0f13d46299 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -21,11 +21,10 @@ #include "mt76x2u.h" static const struct usb_device_id mt76x2u_device_table[] = { - { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ - { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ + { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ @@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf, mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); + if (!is_mt76x2(dev)) { + err = -ENODEV; + goto err; + } err = mt76x2u_register_device(dev); if (err < 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index 5e84b4535cb1..3b82345756ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev) mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); - mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20); mt76_wr(dev, MT_WMM_AIFSN, 0x2273); mt76_wr(dev, MT_WMM_CWMIN, 0x2344); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5a349fe3e576..2585df512335 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); dev->queue_ops->kick(dev, q); - if (q->queued > q->ndesc - 8) + if (q->queued > q->ndesc - 8 && !q->stopped) { ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); + q->stopped = true; + } + spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, if (last_skb) { mt76_queue_ps_skb(dev, sta, last_skb, true); dev->queue_ops->kick(dev, hwq); + } else { + ieee80211_sta_eosp(sta); } + spin_unlock_bh(&hwq->lock); } EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); @@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; struct mt76_queue *hwq = mtxq->hwq; + if (!test_bit(MT76_STATE_RUNNING, &dev->state)) + return; + spin_lock_bh(&hwq->lock); if (list_empty(&mtxq->list)) list_add_tail(&mtxq->list, &hwq->swq); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index ae6ada370597..4c1abd492405 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data) spin_lock_bh(&q->lock); } mt76_txq_schedule(dev, q); - wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + + wake = q->stopped && q->queued < q->ndesc - 8; + if (wake) + q->stopped = false; + if (!q->queued) wake_up(&dev->tx_wait); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index d8b7863f7926..6ae7f14dc9bf 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf, mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", asic_rev, mac_rev); + if ((asic_rev >> 16) != 0x7601) { + ret = -ENODEV; + goto err; + } /* Note: vendor driver skips this check for MT7601U */ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 2ad263f708da..bb57ec239029 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -180,7 +180,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) return ndev->reg->mw_bar[idx]; } -static inline int ndev_db_addr(struct intel_ntb_dev *ndev, +void ndev_db_addr(struct intel_ntb_dev *ndev, phys_addr_t *db_addr, resource_size_t *db_size, phys_addr_t reg_addr, unsigned long reg) { @@ -196,8 +196,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, *db_size = ndev->reg->db_size; dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); } - - return 0; } u64 ndev_db_read(struct intel_ntb_dev *ndev, @@ -1111,13 +1109,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) ndev->self_reg->db_mask); } -int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size) +static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, u64 *db_data, int db_bit) { + u64 db_bits; struct intel_ntb_dev *ndev = ntb_ndev(ntb); - return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + db_bits = BIT_ULL(db_bit); + + if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, ndev->peer_reg->db_bell); + + if (db_data) + *db_data = db_bits; + + + return 0; } static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h index ad8ec1444436..544cf5c06f4d 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h @@ -147,6 +147,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr; int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, int msix_shift, int total_shift); enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); +void ndev_db_addr(struct intel_ntb_dev *ndev, + phys_addr_t *db_addr, resource_size_t *db_size, + phys_addr_t reg_addr, unsigned long reg); u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, void __iomem *mmio); @@ -166,8 +169,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb); u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); -int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size); int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); int intel_ntb_spad_count(struct ntb_dev *ntb); u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index b3fa24778f94..f475b56a3f49 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -532,6 +532,37 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, return 0; } +int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, int db_bit) +{ + phys_addr_t db_addr_base; + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr, + ndev->peer_reg->db_bell); + + if (db_addr) { + *db_addr = db_addr_base + (db_bit * 4); + dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n", + *db_addr, db_bit); + } + + if (db_data) { + *db_data = 1; + dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n", + *db_data, db_bit); + } + + return 0; +} + static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); @@ -584,7 +615,7 @@ const struct ntb_dev_ops intel_ntb3_ops = { .db_clear = intel_ntb3_db_clear, .db_set_mask = intel_ntb_db_set_mask, .db_clear_mask = intel_ntb_db_clear_mask, - .peer_db_addr = intel_ntb_peer_db_addr, + .peer_db_addr = intel_ntb3_peer_db_addr, .peer_db_set = intel_ntb3_peer_db_set, .spad_is_unsafe = intel_ntb_spad_is_unsafe, .spad_count = intel_ntb_spad_count, diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index f2df2d39c65b..d905d368d28c 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -236,6 +236,7 @@ static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); iowrite32(0, &ctl->bar_entry[bar].win_size); + iowrite32(0, &ctl->bar_ext_entry[bar].win_size); iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); } @@ -258,7 +259,9 @@ static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); - iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); + iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000), + &ctl->bar_entry[bar].win_size); + iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size); iowrite64(sndev->self_partition | addr, &ctl->bar_entry[bar].xlate_addr); } @@ -679,11 +682,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size) + resource_size_t *db_size, + u64 *db_data, + int db_bit) { struct switchtec_ntb *sndev = ntb_sndev(ntb); unsigned long offset; + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - (unsigned long)sndev->stdev->mmio; @@ -693,6 +701,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, *db_addr = pci_resource_start(ntb->pdev, 0) + offset; if (db_size) *db_size = sizeof(u32); + if (db_data) + *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift; return 0; } @@ -1025,7 +1035,9 @@ static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx, ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); - iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); + iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000), + &ctl->bar_entry[bar].win_size); + iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size); iowrite64(sndev->peer_partition | addr, &ctl->bar_entry[bar].xlate_addr); } @@ -1092,7 +1104,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev, dev_dbg(&sndev->stdev->dev, "Crosslink BAR%d addr: %llx\n", - i, bar_addr); + i*2, bar_addr); if (bar_addr != bar_space * i) continue; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 3bfdb4562408..d4f39ba1d976 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -144,7 +144,9 @@ struct ntb_transport_qp { struct list_head tx_free_q; spinlock_t ntb_tx_free_q_lock; void __iomem *tx_mw; - dma_addr_t tx_mw_phys; + phys_addr_t tx_mw_phys; + size_t tx_mw_size; + dma_addr_t tx_mw_dma_addr; unsigned int tx_index; unsigned int tx_max_entry; unsigned int tx_max_frame; @@ -862,6 +864,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) if (!nt->link_is_up) cancel_delayed_work_sync(&nt->link_work); + for (i = 0; i < nt->mw_count; i++) + ntb_free_mw(nt, i); + /* The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed @@ -1049,6 +1054,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, tx_size = (unsigned int)mw_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count); + qp->tx_mw_size = tx_size; qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; if (!qp->tx_mw) return -EINVAL; @@ -1644,7 +1650,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, dma_cookie_t cookie; device = chan->device; - dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; + dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; buff_off = (size_t)buf & ~PAGE_MASK; dest_off = (size_t)dest & ~PAGE_MASK; @@ -1863,6 +1869,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev, qp->rx_dma_chan = NULL; } + if (qp->tx_dma_chan) { + qp->tx_mw_dma_addr = + dma_map_resource(qp->tx_dma_chan->device->dev, + qp->tx_mw_phys, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(qp->tx_dma_chan->device->dev, + qp->tx_mw_dma_addr)) { + qp->tx_mw_dma_addr = 0; + goto err1; + } + } + dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", qp->tx_dma_chan ? "DMA" : "CPU"); @@ -1904,6 +1922,10 @@ err1: qp->rx_alloc_entry = 0; while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); + if (qp->tx_mw_dma_addr) + dma_unmap_resource(qp->tx_dma_chan->device->dev, + qp->tx_mw_dma_addr, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); if (qp->tx_dma_chan) dma_release_channel(qp->tx_dma_chan); if (qp->rx_dma_chan) @@ -1945,6 +1967,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) */ dma_sync_wait(chan, qp->last_cookie); dmaengine_terminate_all(chan); + + dma_unmap_resource(chan->device->dev, + qp->tx_mw_dma_addr, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); + dma_release_channel(chan); } diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index 521eaf53a52a..36be9b619187 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -47,6 +47,7 @@ static int e820_register_one(struct resource *res, void *data) ndr_desc.res = res; ndr_desc.attr_groups = e820_pmem_region_attribute_groups; ndr_desc.numa_node = e820_range_to_nid(res->start); + ndr_desc.target_node = ndr_desc.numa_node; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) return -ENXIO; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 379bf4305e61..a5ac3b240293 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -153,7 +153,7 @@ struct nd_region { u16 ndr_mappings; u64 ndr_size; u64 ndr_start; - int id, num_lanes, ro, numa_node; + int id, num_lanes, ro, numa_node, target_node; void *provider_data; struct kernfs_node *bb_state; struct badblocks bb; diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 11b9821eba85..a0c8dcfa0bf9 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -68,6 +68,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; ndr_desc.numa_node = dev_to_node(&pdev->dev); + ndr_desc.target_node = ndr_desc.numa_node; ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 3b58baa44b5c..b4ef7d9ff22e 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1072,6 +1072,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, nd_region->flags = ndr_desc->flags; nd_region->ro = ro; nd_region->numa_node = ndr_desc->numa_node; + nd_region->target_node = ndr_desc->target_node; ida_init(&nd_region->ns_ida); ida_init(&nd_region->btt_ida); ida_init(&nd_region->pfn_ida); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 07bf2bff3a76..470601980794 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -179,8 +179,8 @@ static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) int ret = 0; /* - * Keep a reference until the work is flushed since ->delete_ctrl - * can free the controller. + * Keep a reference until nvme_do_delete_ctrl() complete, + * since ->delete_ctrl can free the controller. */ nvme_get_ctrl(ctrl); if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) @@ -1250,7 +1250,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, if (ns) { if (ctrl->effects) effects = le32_to_cpu(ctrl->effects->iocs[opcode]); - if (effects & ~NVME_CMD_EFFECTS_CSUPP) + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn(ctrl->device, "IO command:%02x has unhandled effects:%08x\n", opcode, effects); @@ -1495,10 +1495,10 @@ static void nvme_set_chunk_size(struct nvme_ns *ns) blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); } -static void nvme_config_discard(struct nvme_ns *ns) +static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) { struct nvme_ctrl *ctrl = ns->ctrl; - struct request_queue *queue = ns->queue; + struct request_queue *queue = disk->queue; u32 size = queue_logical_block_size(queue); if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { @@ -1526,12 +1526,13 @@ static void nvme_config_discard(struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static inline void nvme_config_write_zeroes(struct nvme_ns *ns) +static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) { u32 max_sectors; unsigned short bs = 1 << ns->lba_shift; - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)) + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || + (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) return; /* * Even though NVMe spec explicitly states that MDTS is not @@ -1548,13 +1549,7 @@ static inline void nvme_config_write_zeroes(struct nvme_ns *ns) else max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; - blk_queue_max_write_zeroes_sectors(ns->queue, max_sectors); -} - -static inline void nvme_ns_config_oncs(struct nvme_ns *ns) -{ - nvme_config_discard(ns); - nvme_config_write_zeroes(ns); + blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); } static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, @@ -1610,7 +1605,9 @@ static void nvme_update_disk_info(struct gendisk *disk, capacity = 0; set_capacity(disk, capacity); - nvme_ns_config_oncs(ns); + + nvme_config_discard(disk, ns); + nvme_config_write_zeroes(disk, ns); if (id->nsattr & (1 << 0)) set_disk_ro(disk, true); @@ -3304,6 +3301,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); mutex_unlock(&ctrl->subsys->lock); + nvme_put_ns_head(ns->head); out_free_id: kfree(id); out_free_queue: diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b29b12498a1a..f3b9d91ba0df 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2107,7 +2107,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_cnt = 0; - if (!blk_rq_payload_bytes(rq)) + if (!blk_rq_nr_phys_segments(rq)) return 0; freq->sg_table.sgl = freq->first_sgl; @@ -2304,12 +2304,23 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret) return ret; - data_len = blk_rq_payload_bytes(rq); - if (data_len) + /* + * nvme core doesn't quite treat the rq opaquely. Commands such + * as WRITE ZEROES will return a non-zero rq payload_bytes yet + * there is no actual payload to be transferred. + * To get it right, key data transmission on there being 1 or + * more physical segments in the sg list. If there is no + * physical segments, there is no payload. + */ + if (blk_rq_nr_phys_segments(rq)) { + data_len = blk_rq_payload_bytes(rq); io_dir = ((rq_data_dir(rq) == WRITE) ? NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); - else + } else { + data_len = 0; io_dir = NVMEFC_FCP_NODATA; + } + return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); } @@ -2464,6 +2475,7 @@ static int nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; unsigned int nr_io_queues; int ret; @@ -2476,6 +2488,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) return ret; } + if (!nr_io_queues && prior_ioq_cnt) { + dev_info(ctrl->ctrl.device, + "Fail Reconnect: At least 1 io queue " + "required (was %d)\n", prior_ioq_cnt); + return -ENOSPC; + } + ctrl->ctrl.queue_count = nr_io_queues + 1; /* check for io queues existing */ if (ctrl->ctrl.queue_count == 1) @@ -2489,6 +2508,10 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) if (ret) goto out_delete_hw_queues; + if (prior_ioq_cnt != nr_io_queues) + dev_info(ctrl->ctrl.device, + "reconnect: revising io queue count from %d to %d\n", + prior_ioq_cnt, nr_io_queues); blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); return 0; @@ -3006,7 +3029,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->ctrl.opts = opts; ctrl->ctrl.nr_reconnects = 0; - ctrl->ctrl.numa_node = dev_to_node(lport->dev); + if (lport->dev) + ctrl->ctrl.numa_node = dev_to_node(lport->dev); + else + ctrl->ctrl.numa_node = NUMA_NO_NODE; INIT_LIST_HEAD(&ctrl->ctrl_list); ctrl->lport = lport; ctrl->rport = rport; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index b91f1838bbd5..527d64545023 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -87,6 +87,11 @@ enum nvme_quirks { * Ignore device provided subnqn. */ NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), + + /* + * Broken Write Zeroes. + */ + NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 92bad1c810ac..a90cf5d63aac 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2937,7 +2937,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ - .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, + .driver_data = NVME_QUIRK_IDENTIFY_CNS | + NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 208ee518af65..e7e08889865e 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -463,6 +463,15 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, queue->data_remaining = le32_to_cpu(pdu->data_length); + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && + unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { + dev_err(queue->ctrl->ctrl.device, + "queue %d tag %#x SUCCESS set but not last PDU\n", + nvme_tcp_queue_id(queue), rq->tag); + nvme_tcp_error_recovery(&queue->ctrl->ctrl); + return -EPROTO; + } + return 0; } @@ -618,6 +627,14 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return ret; } +static inline void nvme_tcp_end_request(struct request *rq, __le16 status) +{ + union nvme_result res = {}; + + nvme_end_request(rq, cpu_to_le16(status << 1), res); +} + + static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -685,6 +702,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; } else { + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) + nvme_tcp_end_request(rq, NVME_SC_SUCCESS); nvme_tcp_init_recv_ctx(queue); } } @@ -695,6 +714,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; char *ddgst = (char *)&queue->recv_ddgst; size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; @@ -718,6 +738,13 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, return -EIO; } + if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { + struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), + pdu->command_id); + + nvme_tcp_end_request(rq, NVME_SC_SUCCESS); + } + nvme_tcp_init_recv_ctx(queue); return 0; } @@ -815,10 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { - union nvme_result res = {}; - - nvme_end_request(blk_mq_rq_from_pdu(req), - cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res); + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR); } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 58456de78bb2..5f24ea7a28eb 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -50,7 +50,19 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10) return ret; } +static const char *nvme_trace_admin_get_features(struct trace_seq *p, + u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 fid = cdw10[0]; + u8 sel = cdw10[1] & 0x7; + u32 cdw11 = get_unaligned_le32(cdw10 + 4); + + trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11); + trace_seq_putc(p, 0); + return ret; +} static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) { @@ -101,6 +113,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, return nvme_trace_create_cq(p, cdw10); case nvme_admin_identify: return nvme_trace_admin_identify(p, cdw10); + case nvme_admin_get_features: + return nvme_trace_admin_get_features(p, cdw10); default: return nvme_trace_common(p, cdw10); } diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index 244d7c177e5a..97d3c77365b8 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -108,7 +108,7 @@ TRACE_EVENT(nvme_setup_cmd, __entry->metadata = le64_to_cpu(cmd->common.metadata); __assign_disk_name(__entry->disk, req->rq_disk); memcpy(__entry->cdw10, &cmd->common.cdw10, - 6 * sizeof(__entry->cdw10)); + sizeof(__entry->cdw10)); ), TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", __entry->ctrl_id, __print_disk_name(__entry->disk), diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index d44ede147263..2d73b66e3686 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1163,6 +1163,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) put_device(ctrl->p2p_client); } +static void nvmet_fatal_error_handler(struct work_struct *work) +{ + struct nvmet_ctrl *ctrl = + container_of(work, struct nvmet_ctrl, fatal_err_work); + + pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); + ctrl->ops->delete_ctrl(ctrl); +} + u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) { @@ -1205,6 +1214,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); @@ -1308,21 +1318,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) kref_put(&ctrl->ref, nvmet_ctrl_free); } -static void nvmet_fatal_error_handler(struct work_struct *work) -{ - struct nvmet_ctrl *ctrl = - container_of(work, struct nvmet_ctrl, fatal_err_work); - - pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); - ctrl->ops->delete_ctrl(ctrl); -} - void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { mutex_lock(&ctrl->lock); if (!(ctrl->csts & NVME_CSTS_CFS)) { ctrl->csts |= NVME_CSTS_CFS; - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); schedule_work(&ctrl->fatal_err_work); } mutex_unlock(&ctrl->lock); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 1e9654f04c60..98b7b1f4ee96 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1143,10 +1143,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); - spin_lock_irqsave(&tgtport->lock, flags); + if (!schedule_work(&assoc->del_work)) + nvmet_fc_tgt_a_put(assoc); } spin_unlock_irqrestore(&tgtport->lock, flags); } @@ -1185,7 +1183,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - schedule_work(&assoc->del_work); + if (!schedule_work(&assoc->del_work)) + nvmet_fc_tgt_a_put(assoc); return; } @@ -1503,10 +1502,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; struct fcnvme_ls_disconnect_acc *acc = (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; - struct nvmet_fc_tgt_queue *queue = NULL; struct nvmet_fc_tgt_assoc *assoc; int ret = 0; - bool del_assoc = false; memset(acc, 0, sizeof(*acc)); @@ -1537,18 +1534,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); iod->assoc = assoc; - if (assoc) { - if (rqst->discon_cmd.scope == - FCNVME_DISCONN_CONNECTION) { - queue = nvmet_fc_find_target_queue(tgtport, - be64_to_cpu( - rqst->discon_cmd.id)); - if (!queue) { - nvmet_fc_tgt_a_put(assoc); - ret = VERR_NO_CONN; - } - } - } else + if (!assoc) ret = VERR_NO_ASSOC; } @@ -1576,26 +1562,10 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, sizeof(struct fcnvme_ls_disconnect_acc)), FCNVME_LS_DISCONNECT); - - /* are we to delete a Connection ID (queue) */ - if (queue) { - int qid = queue->qid; - - nvmet_fc_delete_target_queue(queue); - - /* release the get taken by find_target_queue */ - nvmet_fc_tgt_q_put(queue); - - /* tear association down if io queue terminated */ - if (!qid) - del_assoc = true; - } - /* release get taken in nvmet_fc_find_target_assoc */ nvmet_fc_tgt_a_put(iod->assoc); - if (del_assoc) - nvmet_fc_delete_target_assoc(iod->assoc); + nvmet_fc_delete_target_assoc(iod->assoc); } diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 71dfedbadc26..a065dbfc43b1 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -194,11 +194,11 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req, le64_to_cpu(range->slba) << (ns->blksize_shift - 9), le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), GFP_KERNEL, 0, bio); - - if (ret) + if (ret && ret != -EOPNOTSUPP) { req->error_slba = le64_to_cpu(range->slba); - - return blk_to_nvme_status(req, errno_to_blk_status(ret)); + return blk_to_nvme_status(req, errno_to_blk_status(ret)); + } + return NVME_SC_SUCCESS; } static void nvmet_bdev_execute_discard(struct nvmet_req *req) diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 517522305e5c..3e43212d3c1c 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -297,7 +297,7 @@ static void nvmet_file_execute_discard(struct nvmet_req *req) } ret = vfs_fallocate(req->ns->file, mode, offset, len); - if (ret) { + if (ret && ret != -EOPNOTSUPP) { req->error_slba = le64_to_cpu(range.slba); status = errno_to_nvme_status(req, ret); break; diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 56dd83a45e55..5484a46dafda 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port) struct pardevice *parport_open(int devnum, const char *name) { struct daisydev *p = topology; - struct pardev_cb par_cb; struct parport *port; struct pardevice *dev; int daisy; - memset(&par_cb, 0, sizeof(par_cb)); spin_lock(&topology_lock); while (p && p->devnum != devnum) p = p->next; @@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name) port = parport_get_port(p->port); spin_unlock(&topology_lock); - dev = parport_register_dev_model(port, name, &par_cb, devnum); + dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); parport_put_port(port); if (!dev) return NULL; @@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port) kfree(deviceid); return detected; } - -static int daisy_drv_probe(struct pardevice *par_dev) -{ - struct device_driver *drv = par_dev->dev.driver; - - if (strcmp(drv->name, "daisy_drv")) - return -ENODEV; - if (strcmp(par_dev->name, daisy_dev_name)) - return -ENODEV; - - return 0; -} - -static struct parport_driver daisy_driver = { - .name = "daisy_drv", - .probe = daisy_drv_probe, - .devmodel = true, -}; - -int daisy_drv_init(void) -{ - return parport_register_driver(&daisy_driver); -} - -void daisy_drv_exit(void) -{ - parport_unregister_driver(&daisy_driver); -} diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index e5e6a463a941..e035174ba205 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; - struct pardevice *dev = parport_open(devnum, daisy_dev_name); + struct pardevice *dev = parport_open (devnum, "Device ID probe"); if (!dev) return -ENXIO; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 0171b8dbcdcd..5dc53d420ca8 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = { int parport_bus_init(void) { - int retval; - - retval = bus_register(&parport_bus_type); - if (retval) - return retval; - daisy_drv_init(); - - return 0; + return bus_register(&parport_bus_type); } void parport_bus_exit(void) { - daisy_drv_exit(); bus_unregister(&parport_bus_type); } diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 900c7073c46f..71308766e891 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd) ret = cros_ec_create_pdinfo(debug_info); if (ret) - goto remove_debugfs; + goto remove_log; ec->debug_info = debug_info; @@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd) return 0; +remove_log: + cros_ec_cleanup_console_log(debug_info); remove_debugfs: debugfs_remove_recursive(debug_info->dir); return ret; @@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev) { struct cros_ec_dev *ec = dev_get_drvdata(dev); - cancel_delayed_work_sync(&ec->debug_info->log_poll_work); + if (ec->debug_info->log_buffer.buf) + cancel_delayed_work_sync(&ec->debug_info->log_poll_work); return 0; } @@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev) { struct cros_ec_dev *ec = dev_get_drvdata(dev); - schedule_delayed_work(&ec->debug_info->log_poll_work, 0); + if (ec->debug_info->log_buffer.buf) + schedule_delayed_work(&ec->debug_info->log_poll_work, 0); return 0; } diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c index f6ff29a11f1a..14355668ddfa 100644 --- a/drivers/platform/chrome/wilco_ec/mailbox.c +++ b/drivers/platform/chrome/wilco_ec/mailbox.c @@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg) msg->command, msg->type, msg->flags, msg->response_size, msg->request_size); + mutex_lock(&ec->mailbox_lock); /* Prepare request packet */ rq = ec->data_buffer; wilco_ec_prepare(msg, rq); - mutex_lock(&ec->mailbox_lock); ret = wilco_ec_transfer(ec, msg, rq); mutex_unlock(&ec->mailbox_lock); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index a0baee25134c..4159c63a5fd2 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1382,3 +1382,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid, return chsc_error_from_response(brinfo_area->response.code); } EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); + +int chsc_sgib(u32 origin) +{ + struct { + struct chsc_header request; + u16 op; + u8 reserved01[2]; + u8 reserved02:4; + u8 fmt:4; + u8 reserved03[7]; + /* operation data area begin */ + u8 reserved04[4]; + u32 gib_origin; + u8 reserved05[10]; + u8 aix; + u8 reserved06[4029]; + struct chsc_header response; + u8 reserved07[4]; + } *sgib_area; + int ret; + + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + sgib_area = chsc_page; + sgib_area->request.length = 0x0fe0; + sgib_area->request.code = 0x0021; + sgib_area->op = 0x1; + sgib_area->gib_origin = origin; + + ret = chsc(sgib_area); + if (ret == 0) + ret = chsc_error_from_response(sgib_area->response.code); + spin_unlock_irq(&chsc_page_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(chsc_sgib); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 78aba8d94eec..e57d68e325a3 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp); int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, u64 summary_indicator_addr, u64 subchannel_indicator_addr); +int chsc_sgib(u32 origin); int chsc_error_from_response(int response); int chsc_siosl(struct subchannel_id schid); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a45f81ec80ce..8e28a505f7e8 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev) if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { devtype = aac->hba_map[chn][tid].devtype; - if (devtype == AAC_DEVTYPE_NATIVE_RAW) + if (devtype == AAC_DEVTYPE_NATIVE_RAW) { depth = aac->hba_map[chn][tid].qd_limit; - else if (devtype == AAC_DEVTYPE_ARC_RAW) + set_timeout = 1; + goto common_config; + } + if (devtype == AAC_DEVTYPE_ARC_RAW) { set_qd_dev_type = true; - - set_timeout = 1; - goto common_config; + set_timeout = 1; + goto common_config; + } } if (aac->jbod && (sdev->type == TYPE_DISK)) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 6c87bd34509a..9bfa9f12d81e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -18,6 +18,7 @@ #include <linux/dmapool.h> #include <linux/iopoll.h> #include <linux/lcm.h> +#include <linux/libata.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> @@ -94,6 +95,11 @@ enum { PORT_TYPE_SATA = (1U << 0), }; +enum dev_status { + HISI_SAS_DEV_INIT, + HISI_SAS_DEV_NORMAL, +}; + enum { HISI_SAS_INT_ABT_CMD = 0, HISI_SAS_INT_ABT_DEV = 1, @@ -161,6 +167,7 @@ struct hisi_sas_phy { u8 in_reset; u8 reserved[2]; u32 phy_type; + u32 code_violation_err_count; enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; }; @@ -194,6 +201,7 @@ struct hisi_sas_device { struct hisi_sas_dq *dq; struct list_head list; enum sas_device_type dev_type; + enum dev_status dev_status; int device_id; int sata_idx; spinlock_t lock; /* For protecting slots */ diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 13ca5a0bdf6b..14bac4966c87 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -10,6 +10,7 @@ */ #include "hisi_sas.h" +#include "../libsas/sas_internal.h" #define DRV_NAME "hisi_sas" #define DEV_IS_GONE(dev) \ @@ -707,6 +708,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; @@ -731,6 +733,8 @@ static int hisi_sas_init_device(struct domain_device *device) struct hisi_sas_tmf_task tmf_task; int retry = HISI_SAS_SRST_ATA_DISK_CNT; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + struct sas_phy *local_phy; switch (device->dev_type) { case SAS_END_DEVICE: @@ -746,6 +750,31 @@ static int hisi_sas_init_device(struct domain_device *device) case SAS_SATA_PM: case SAS_SATA_PM_PORT: case SAS_SATA_PENDING: + /* + * send HARD RESET to clear previous affiliation of + * STP target port + */ + local_phy = sas_get_local_phy(device); + if (!scsi_is_sas_phy_local(local_phy)) { + unsigned long deadline = ata_deadline(jiffies, 20000); + struct sata_device *sata_dev = &device->sata_dev; + struct ata_host *ata_host = sata_dev->ata_host; + struct ata_port_operations *ops = ata_host->ops; + struct ata_port *ap = sata_dev->ap; + struct ata_link *link; + unsigned int classes; + + ata_for_each_link(link, ap, EDGE) + rc = ops->hardreset(link, &classes, + deadline); + } + sas_put_local_phy(local_phy); + if (rc) { + dev_warn(dev, "SATA disk hardreset fail: 0x%x\n", + rc); + return rc; + } + while (retry-- > 0) { rc = hisi_sas_softreset_ata_disk(device); if (!rc) @@ -808,6 +837,7 @@ static int hisi_sas_dev_found(struct domain_device *device) rc = hisi_sas_init_device(device); if (rc) goto err_out; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; return 0; err_out: @@ -980,7 +1010,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; + if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) + task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); } @@ -1713,20 +1744,23 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) { struct sas_phy *local_phy = sas_get_local_phy(device); - int rc, reset_type = (device->dev_type == SAS_SATA_DEV || - (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct sas_ha_struct *sas_ha = &hisi_hba->sha; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); DECLARE_COMPLETION_ONSTACK(phyreset); + int rc, reset_type; if (scsi_is_sas_phy_local(local_phy)) { phy->in_reset = 1; phy->reset_completion = &phyreset; } + reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || + !dev_is_sata(device)) ? 1 : 0; + rc = sas_phy_reset(local_phy, reset_type); sas_put_local_phy(local_phy); @@ -1742,8 +1776,13 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) /* report PHY down if timed out */ if (!ret) hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); - } else + } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { + /* + * If in init state, we rely on caller to wait for link to be + * ready; otherwise, delay. + */ msleep(2000); + } return rc; } @@ -1762,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); + if (dev_is_sata(device)) { + rc = hisi_sas_softreset_ata_disk(device); + if (rc) + return TMF_RESP_FUNC_FAILED; + } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) @@ -2125,9 +2170,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) { + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + struct sas_phy_data *d = sphy->hostdata; + phy->phy_attached = 0; phy->phy_type = 0; phy->port = NULL; + + if (d->enable) + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + else + sphy->negotiated_linkrate = SAS_PHY_DISABLED; } void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) @@ -2253,6 +2307,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; hisi_hba->devices[i].device_id = i; + hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; } for (i = 0; i < hisi_hba->queue_count; i++) { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index e40cc6b3b67b..89160ab3efb0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -868,6 +868,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 9ec8848ec541..086695a4099f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -129,6 +129,7 @@ #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define CMD_HDR_PIR_OFF 8 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) +#define SERDES_CFG (PORT_BASE + 0x1c) #define SL_CFG (PORT_BASE + 0x84) #define AIP_LIMIT (PORT_BASE + 0x90) #define SL_CONTROL (PORT_BASE + 0x94) @@ -181,6 +182,8 @@ #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 #define CHL_INT2 (PORT_BASE + 0x1bc) #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 +#define CHL_INT2_RX_DISP_ERR_OFF 28 +#define CHL_INT2_RX_CODE_ERR_OFF 29 #define CHL_INT2_RX_INVLD_DW_OFF 30 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) @@ -523,6 +526,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) } hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); @@ -544,6 +548,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); + hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, + 0x30f4240); /* used for 12G negotiate */ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); @@ -1344,7 +1350,8 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { - int i, res; + int i; + irqreturn_t res; u32 context, port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -1575,6 +1582,39 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); } +static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + unsigned long flags; + u32 reg_value; + + spin_lock_irqsave(&phy->lock, flags); + + /* loss dword sync */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); + sphy->loss_of_dword_sync_count += reg_value; + + /* phy reset problem */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); + sphy->phy_reset_problem_count += reg_value; + + /* invalid dword */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); + sphy->invalid_dword_count += reg_value; + + /* disparity err */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); + sphy->running_disparity_error_count += reg_value; + + /* code violation error */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); + phy->code_violation_err_count += reg_value; + + spin_unlock_irqrestore(&phy->lock, flags); +} + static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); @@ -1582,6 +1622,9 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct pci_dev *pci_dev = hisi_hba->pci_dev; struct device *dev = hisi_hba->dev; + static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | + BIT(CHL_INT2_RX_CODE_ERR_OFF) | + BIT(CHL_INT2_RX_INVLD_DW_OFF); irq_value &= ~irq_msk; if (!irq_value) @@ -1602,6 +1645,25 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } + if (pci_dev->revision > 0x20 && (irq_value & msk)) { + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + + phy_get_events_v3_hw(hisi_hba, phy_no); + + if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) + dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, + sphy->invalid_dword_count); + + if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) + dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, + phy->code_violation_err_count); + + if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) + dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, + sphy->running_disparity_error_count); + } + if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && (pci_dev->revision == 0x20)) { u32 reg_value; @@ -2230,31 +2292,6 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) return hisi_sas_read32(hisi_hba, PHY_STATE); } -static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) -{ - struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - struct sas_phy *sphy = sas_phy->phy; - u32 reg_value; - - /* loss dword sync */ - reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); - sphy->loss_of_dword_sync_count += reg_value; - - /* phy reset problem */ - reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); - sphy->phy_reset_problem_count += reg_value; - - /* invalid dword */ - reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); - sphy->invalid_dword_count += reg_value; - - /* disparity err */ - reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); - sphy->running_disparity_error_count += reg_value; - -} - static int disable_host_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 1135e74646e2..8cec5230fe31 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -96,6 +96,7 @@ static int client_reserve = 1; static char partition_name[96] = "UNKNOWN"; static unsigned int partition_number = -1; static LIST_HEAD(ibmvscsi_head); +static DEFINE_SPINLOCK(ibmvscsi_driver_lock); static struct scsi_transport_template *ibmvscsi_transport_template; @@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) } dev_set_drvdata(&vdev->dev, hostdata); + spin_lock(&ibmvscsi_driver_lock); list_add_tail(&hostdata->host_list, &ibmvscsi_head); + spin_unlock(&ibmvscsi_driver_lock); return 0; add_srp_port_failed: @@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - list_del(&hostdata->host_list); - unmap_persist_bufs(hostdata); + unsigned long flags; + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); + + spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); kthread_stop(hostdata->work_thread); - srp_remove_host(hostdata->host); - scsi_remove_host(hostdata->host); + unmap_persist_bufs(hostdata); + + spin_lock(&ibmvscsi_driver_lock); + list_del(&hostdata->host_list); + spin_unlock(&ibmvscsi_driver_lock); + scsi_host_put(hostdata->host); return 0; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 21309d5b456d..e893949a3d11 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); * @datalen: len of buffer * * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and - * then completes the command and task. + * then completes the command and task. called under back_lock **/ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, @@ -894,6 +894,9 @@ out: * @conn: iscsi connection * @hdr: iscsi pdu * @task: scsi command task + * + * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received + * then completes the command and task. called under back_lock **/ static void iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, @@ -978,6 +981,16 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) return 0; } +/** + * iscsi_nop_out_rsp - SCSI NOP Response processing + * @task: scsi command task + * @nop: the nop structure + * @data: where to put the data + * @datalen: length of data + * + * iscsi_nop_out_rsp handles nop response from use or + * from user space. called under back_lock + **/ static int iscsi_nop_out_rsp(struct iscsi_task *task, struct iscsi_nopin *nop, char *data, int datalen) { @@ -1750,7 +1763,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) return 0; prepd_reject: + spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); reject: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1758,7 +1773,9 @@ reject: return SCSI_MLQUEUE_TARGET_BUSY; prepd_fault: + spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); fault: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -3075,8 +3092,9 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) state = ISCSI_TASK_ABRT_SESS_RECOV; if (task->state == ISCSI_TASK_PENDING) state = ISCSI_TASK_COMPLETED; + spin_lock_bh(&session->back_lock); iscsi_complete_task(task, state); - + spin_unlock_bh(&session->back_lock); } } diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 9923e9e3b884..c3fe3f3a78f5 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -129,12 +129,17 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) BUG_ON(sg->length == 0); /* + * We always map for the recv path. + * * If the page count is greater than one it is ok to send * to the network layer's zero copy send path. If not we - * have to go the slow sendmsg path. We always map for the - * recv path. + * have to go the slow sendmsg path. + * + * Same goes for slab pages: skb_can_coalesce() allows + * coalescing neighboring slab objects into a single frag which + * triggers one of hardened usercopy checks. */ - if (page_count(sg_page(sg)) >= 1 && !recv) + if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg))) return; if (recv) { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3b5873f6751e..7fcdaed3fa94 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4090,7 +4090,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) /* Sanity check to ensure our sizing is right for both SCSI and NVME */ if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "6426 Common buffer size %ld exceeds %d\n", + "6426 Common buffer size %zd exceeds %d\n", sizeof(struct lpfc_io_buf), LPFC_COMMON_IO_BUF_SZ); return 0; @@ -10052,7 +10052,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; unsigned long bar0map_len, bar1map_len, bar2map_len; - int error = -ENODEV; + int error; uint32_t if_type; if (!pdev) @@ -10071,7 +10071,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) */ if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &phba->sli4_hba.sli_intf.word0)) { - return error; + return -ENODEV; } /* There is no SLI3 failback for SLI4 devices. */ @@ -10081,7 +10081,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "2894 SLI_INTF reg contents invalid " "sli_intf reg 0x%x\n", phba->sli4_hba.sli_intf.word0); - return error; + return -ENODEV; } if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); @@ -10105,7 +10105,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); - goto out; + return -ENODEV; } phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; /* Set up BAR0 PCI config space register memory map */ @@ -10116,7 +10116,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { dev_printk(KERN_ERR, &pdev->dev, "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); - goto out; + return -ENODEV; } phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); @@ -10124,7 +10124,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); - goto out; + return -ENODEV; } lpfc_sli4_bar0_register_memmap(phba, if_type); } @@ -10170,6 +10170,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) if (!phba->sli4_hba.drbl_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA doorbell registers.\n"); + error = -ENOMEM; goto out_iounmap_conf; } phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; @@ -10219,6 +10220,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) if (!phba->sli4_hba.dpp_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA dpp registers.\n"); + error = -ENOMEM; goto out_iounmap_ctrl; } phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; @@ -10249,7 +10251,7 @@ out_iounmap_ctrl: iounmap(phba->sli4_hba.ctrl_regs_memmap_p); out_iounmap_conf: iounmap(phba->sli4_hba.conf_regs_memmap_p); -out: + return error; } @@ -11137,7 +11139,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) lpfc_sli4_ras_dma_free(phba); /* Stop the SLI4 device port */ - phba->pport->work_port_events = 0; + if (phba->pport) + phba->pport->work_port_events = 0; } /** diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 55ab9d3ee4ba..1aa00d2c3f74 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -965,7 +965,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_nodelist *ndlp; struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_lport *lport; - uint32_t code, status, idx, cpu; + uint32_t code, status, idx; uint16_t cid, sqhd, data; uint32_t *ptr; @@ -1138,6 +1138,7 @@ out_err: lpfc_nvme_ktime(phba, lpfc_ncmd); } if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + uint32_t cpu; idx = lpfc_ncmd->cur_iocbq.hba_wqidx; cpu = smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d0817facdae3..57b4a463b589 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9881,7 +9881,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, * The WQE can be either 64 or 128 bytes, */ - lockdep_assert_held(&phba->hbalock); + lockdep_assert_held(&pring->ring_lock); if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index dace907744a5..293f5cf524d7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3924,12 +3924,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) /* * The cur_state should not last for more than max_wait secs */ - for (i = 0; i < max_wait; i++) { + for (i = 0; i < max_wait * 50; i++) { curr_abs_state = instance->instancet-> read_fw_status_reg(instance); if (abs_state == curr_abs_state) { - msleep(1000); + msleep(20); } else break; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 2eb1ae721a7d..f928c4d3a1ef 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1652,6 +1652,8 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, } rval = kstrtol(buf, 10, &type); + if (rval) + return rval; speed = type; if (type == 40 || type == 80 || type == 160 || type == 320) { diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index ead17288e2a7..5819a45ac5ef 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -193,6 +193,8 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) for (i = 0; i < vha->hw->max_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd; core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf; qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 420045155ba0..0c700b140ce7 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if ((domain & 0xf0) == 0xf0) continue; + /* Bypass if not same domain and area of adapter. */ + if (area && domain && ((area != vha->d_id.b.area) || + (domain != vha->d_id.b.domain)) && + (ha->current_topology == ISP_CFG_NL)) + continue; + + /* Bypass invalid local loop ID. */ if (loop_id > LAST_LOCAL_LOOP_ID) continue; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 63f8e3c19841..456a41d2e2c6 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1132,7 +1132,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, /* if initiator doing write or target doing read */ if (direction_to_device) { for_each_sg(sgl, sg, tot_dsds, i) { - dma_addr_t sle_phys = sg_phys(sg); + u64 sle_phys = sg_phys(sg); /* If SGE addr + len flips bits in upper 32-bits */ if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { @@ -1178,7 +1178,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", - __func__, i, sg_phys(sg), sglen, ldma_sg_len, + __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, difctx->dif_bundl_len, ldma_needed); while (sglen) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 677f82fdf56f..91f576d743fe 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, goto eh_reset_failed; } err = 2; - if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1) + if (do_reset(fcport, cmd->device->lun, 1) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800c, "do_reset failed for cmd=%p.\n", cmd); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 20189675677a..601b9f1de267 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -585,10 +585,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; - destroy_rcu_head(&cmd->rcu); } /* + * Calling rcu_barrier() is not necessary here because the + * SCSI error handler guarantees that the function called by + * call_rcu() has been called before scsi_end_request() is + * called. + */ + destroy_rcu_head(&cmd->rcu); + + /* * In the MQ case the command gets freed by __blk_mq_end_request, * so we have to do all cleanup that depends on it earlier. * @@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - sdev->quiesced_by = NULL; - blk_clear_pm_only(sdev->request_queue); + if (sdev->quiesced_by) { + sdev->quiesced_by = NULL; + blk_clear_pm_only(sdev->request_queue); + } if (sdev->sdev_state == SDEV_QUIESCE) scsi_device_set_state(sdev, SDEV_RUNNING); mutex_unlock(&sdev->state_mutex); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0508831d6fb9..0a82e93566dc 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session) scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* flush running scans then delete devices */ flush_work(&session->scan_work); + /* flush running unbind operations */ + flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 5d9ccbab7581..75ec43aa8df3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2764,6 +2764,12 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) sshdr.sense_key == HARDWARE_ERROR && sshdr.asc == 0x3e && sshdr.ascq == 0x1) { + struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); + struct pqi_scsi_dev *device = scmd->device->hostdata; + + if (printk_ratelimit()) + scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); pqi_take_device_offline(scmd->device, "RAID"); host_byte = DID_NO_CONNECT; } diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index f2d3df357a97..0e855b5afe82 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -640,7 +640,7 @@ static int ufs_hi3670_init(struct ufs_hba *hba) return 0; } -static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { +static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { .name = "hi3660", .init = ufs_hi3660_init, .link_startup_notify = ufs_hisi_link_startup_notify, @@ -649,7 +649,7 @@ static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { .resume = ufs_hisi_resume, }; -static struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { +static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { .name = "hi3670", .init = ufs_hi3670_init, .link_startup_notify = ufs_hisi_link_startup_notify, @@ -669,13 +669,10 @@ MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); static int ufs_hisi_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - struct ufs_hba_variant_ops *vops; - struct device *dev = &pdev->dev; - of_id = of_match_node(ufs_hisi_of_match, dev->of_node); - vops = (struct ufs_hba_variant_ops *)of_id->data; + of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node); - return ufshcd_pltfrm_init(pdev, vops); + return ufshcd_pltfrm_init(pdev, of_id->data); } static int ufs_hisi_remove(struct platform_device *pdev) diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 895a9b5ac989..27213676329c 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -297,7 +297,7 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) * Returns 0 on success, non-zero value on failure */ int ufshcd_pltfrm_init(struct platform_device *pdev, - struct ufs_hba_variant_ops *vops) + const struct ufs_hba_variant_ops *vops) { struct ufs_hba *hba; void __iomem *mmio_base; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h index df64c4180340..1f29e1fd6d52 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.h +++ b/drivers/scsi/ufs/ufshcd-pltfrm.h @@ -17,7 +17,7 @@ #include "ufshcd.h" int ufshcd_pltfrm_init(struct platform_device *pdev, - struct ufs_hba_variant_ops *vops); + const struct ufs_hba_variant_ops *vops); void ufshcd_pltfrm_shutdown(struct platform_device *pdev); #ifdef CONFIG_PM diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 69ba7445d2b3..ecfa898b9ccc 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -546,7 +546,7 @@ struct ufs_hba { int nutrs; int nutmrs; u32 ufs_version; - struct ufs_hba_variant_ops *vops; + const struct ufs_hba_variant_ops *vops; void *priv; unsigned int irq; bool is_irq_enabled; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1a6f150cd2d8..8af01777d09c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -586,7 +586,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return FAILED; memset(cmd, 0, sizeof(*cmd)); - cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = cpu_to_virtio32(vscsi->vdev, @@ -645,7 +644,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc) return FAILED; memset(cmd, 0, sizeof(*cmd)); - cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 845b32eaff36..481d371c4b01 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1660,7 +1660,7 @@ static void tcmu_dev_kref_release(struct kref *kref) WARN_ON(!all_expired); tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); - kfree(udev->data_bitmap); + bitmap_free(udev->data_bitmap); mutex_unlock(&udev->cmdr_lock); call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); @@ -1791,11 +1791,12 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev, ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, TCMU_MCGRP_CONFIG, GFP_KERNEL); - /* We don't care if no one is listening */ - if (ret == -ESRCH) - ret = 0; - if (!ret) - ret = tcmu_wait_genl_cmd_reply(udev); + + /* Wait during an add as the listener may not be up yet */ + if (ret == 0 || + (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) + return tcmu_wait_genl_cmd_reply(udev); + return ret; } @@ -1867,9 +1868,7 @@ static int tcmu_configure_device(struct se_device *dev) info = &udev->uio_info; mutex_lock(&udev->cmdr_lock); - udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), - sizeof(unsigned long), - GFP_KERNEL); + udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); mutex_unlock(&udev->cmdr_lock); if (!udev->data_bitmap) { ret = -ENOMEM; @@ -1956,7 +1955,7 @@ err_register: vfree(udev->mb_addr); udev->mb_addr = NULL; err_vzalloc: - kfree(udev->data_bitmap); + bitmap_free(udev->data_bitmap); udev->data_bitmap = NULL; err_bitmap_alloc: kfree(info->name); diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 720760cd493f..ba39647a690c 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = { static void bcm2835_thermal_debugfs(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); struct debugfs_regset32 *regset; data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); @@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) data->tz = tz; - platform_set_drvdata(pdev, tz); + platform_set_drvdata(pdev, data); /* * Thermal_zone doesn't enable hwmon as default, @@ -290,8 +289,8 @@ err_clk: static int bcm2835_thermal_remove(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tz; debugfs_remove_recursive(data->debugfsdir); thermal_zone_of_sensor_unregister(&pdev->dev, tz); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6fff16113628..f7c1f49ec87f 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 power, unsigned long *state) { - unsigned int cur_freq, target_freq; + unsigned int target_freq; u32 last_load, normalised_power; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; - cur_freq = cpufreq_quick_get(policy->cpu); power = power > 0 ? power : 0; last_load = cpufreq_cdev->last_load ?: 1; normalised_power = (power * 100) / last_load; diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 61ca7ce3624e..5f3ed24e26ec 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -22,6 +22,13 @@ enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, INT3400_THERMAL_ACTIVE, INT3400_THERMAL_CRITICAL, + INT3400_THERMAL_ADAPTIVE_PERFORMANCE, + INT3400_THERMAL_EMERGENCY_CALL_MODE, + INT3400_THERMAL_PASSIVE_2, + INT3400_THERMAL_POWER_BOSS, + INT3400_THERMAL_VIRTUAL_SENSOR, + INT3400_THERMAL_COOLING_MODE, + INT3400_THERMAL_HARDWARE_DUTY_CYCLING, INT3400_THERMAL_MAXIMUM_UUID, }; @@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "3A95C389-E4B8-4629-A526-C52C88626BAE", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", + "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", + "5349962F-71E6-431D-9AE8-0A635B710AEE", + "9E04115A-AE87-4D1C-9500-0F3E340BFE75", + "F5A35014-C209-46A4-993A-EB56DE7530A1", + "6ED722A7-9240-48A5-B479-31EEF723D7CF", + "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531", + "BE84BABF-C4D4-403D-B495-3128FD44dAC1", }; struct int3400_thermal_priv { @@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { - int3400_thermal_ops.get_mode = int3400_thermal_get_mode; - int3400_thermal_ops.set_mode = int3400_thermal_set_mode; - } + int3400_thermal_ops.get_mode = int3400_thermal_get_mode; + int3400_thermal_ops.set_mode = int3400_thermal_set_mode; + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 7571f7c2e7c9..ac7256b5f020 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -101,7 +101,7 @@ struct powerclamp_worker_data { bool clamping; }; -static struct powerclamp_worker_data * __percpu worker_data; +static struct powerclamp_worker_data __percpu *worker_data; static struct thermal_cooling_device *cooling_dev; static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu * clamping kthread worker @@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu) struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); struct kthread_worker *worker; - worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu); if (IS_ERR(worker)) return; diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 5c07a61447d3..e4ea7f6aef20 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -199,6 +199,9 @@ enum { #define MT7622_TS1 0 #define MT7622_NUM_CONTROLLER 1 +/* The maximum number of banks */ +#define MAX_NUM_ZONES 8 + /* The calibration coefficient of sensor */ #define MT7622_CALIBRATION 165 @@ -249,7 +252,7 @@ struct mtk_thermal_data { const int num_controller; const int *controller_offset; bool need_switch_bank; - struct thermal_bank_cfg bank_data[]; + struct thermal_bank_cfg bank_data[MAX_NUM_ZONES]; }; struct mtk_thermal { @@ -268,7 +271,7 @@ struct mtk_thermal { s32 vts[MAX_NUM_VTS]; const struct mtk_thermal_data *conf; - struct mtk_thermal_bank banks[]; + struct mtk_thermal_bank banks[MAX_NUM_ZONES]; }; /* MT8183 thermal sensor data */ diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 48eef552cba4..fc9399d9c082 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp) struct exynos_tmu_data *data = p; int value, ret = 0; - if (!data || !data->tmu_read || !data->enabled) + if (!data || !data->tmu_read) return -EINVAL; else if (!data->enabled) /* diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c index e695adb0e573..2dc5703eac51 100644 --- a/drivers/video/fbdev/aty/radeon_pm.c +++ b/drivers/video/fbdev/aty/radeon_pm.c @@ -2844,8 +2844,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis * in some desktop G4s), Via (M9+ chip on iBook G4) and * Snowy (M11 chip on iBook G4 manufactured after July 2005) */ - if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") || - !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) { + if (of_node_name_eq(rinfo->of_node, "ATY,JasperParent") || + of_node_name_eq(rinfo->of_node, "ATY,SnowyParent")) { rinfo->reinit_func = radeon_reinitialize_M10; rinfo->pm_mode |= radeon_pm_off; } @@ -2855,7 +2855,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis rinfo->pm_mode |= radeon_pm_off; } #endif - if (!strcmp(rinfo->of_node->name, "ATY,ViaParent")) { + if (of_node_name_eq(rinfo->of_node, "ATY,ViaParent")) { rinfo->reinit_func = radeon_reinitialize_M9P; rinfo->pm_mode |= radeon_pm_off; } diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index 9af54c2368fd..a6dce1a78490 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -486,8 +486,8 @@ static int cg14_probe(struct platform_device *op) info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); - if (!strcmp(dp->parent->name, "sbus") || - !strcmp(dp->parent->name, "sbi")) { + if (of_node_name_eq(dp->parent, "sbus") || + of_node_name_eq(dp->parent, "sbi")) { info->fix.smem_start = op->resource[0].start; par->iospace = op->resource[0].flags & IORESOURCE_BITS; } else { diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 1bd95b02f3aa..6d42def8436b 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -369,7 +369,7 @@ static int cg3_probe(struct platform_device *op) info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; - if (!strcmp(dp->name, "cgRDI")) + if (of_node_name_eq(dp, "cgRDI")) par->flags |= CG3_FLAG_RDI; if (par->flags & CG3_FLAG_RDI) cg3_rdi_maybe_fixup_var(&info->var, dp); diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 40182ed85648..ca549e1532e6 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -349,7 +349,7 @@ static void init_chips(struct fb_info *p, unsigned long addr) static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) { struct fb_info *p; - unsigned long addr, size; + unsigned long addr; unsigned short cmd; int rc = -ENODEV; @@ -361,7 +361,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) goto err_disable; addr = pci_resource_start(dp, 0); - size = pci_resource_len(dp, 0); if (addr == 0) goto err_disable; diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c index 39509ccd92f1..3b5bd666b952 100644 --- a/drivers/video/fbdev/core/fb_cmdline.c +++ b/drivers/video/fbdev/core/fb_cmdline.c @@ -75,36 +75,33 @@ EXPORT_SYMBOL(fb_get_options); * NOTE: This function is a __setup and __init function. * It only stores the options. Drivers have to call * fb_get_options() as necessary. - * - * Returns zero. - * */ static int __init video_setup(char *options) { - int i, global = 0; - if (!options || !*options) - global = 1; + goto out; - if (!global && !strncmp(options, "ofonly", 6)) { + if (!strncmp(options, "ofonly", 6)) { ofonly = 1; - global = 1; + goto out; } - if (!global && !strchr(options, ':')) { - fb_mode_option = options; - global = 1; - } + if (strchr(options, ':')) { + /* named */ + int i; - if (!global) { for (i = 0; i < FB_MAX; i++) { if (video_options[i] == NULL) { video_options[i] = options; break; } } + } else { + /* global */ + fb_mode_option = options; } +out: return 1; } __setup("video=", video_setup); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index bfa1360ec750..cd059a801662 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -656,11 +656,14 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, kfree(save); } + if (logo_shown == FBCON_LOGO_DONTSHOW) + return; + if (logo_lines > vc->vc_bottom) { logo_shown = FBCON_LOGO_CANSHOW; printk(KERN_INFO "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); - } else if (logo_shown != FBCON_LOGO_DONTSHOW) { + } else { logo_shown = FBCON_LOGO_DRAW; vc->vc_top = logo_lines; } @@ -999,7 +1002,7 @@ static const char *fbcon_startup(void) if (!softback_buf) { softback_buf = (unsigned long) - kmalloc(fbcon_softback_size, + kvmalloc(fbcon_softback_size, GFP_KERNEL); if (!softback_buf) { fbcon_softback_size = 0; @@ -1008,7 +1011,7 @@ static const char *fbcon_startup(void) } } else { if (softback_buf) { - kfree((void *) softback_buf); + kvfree((void *) softback_buf); softback_buf = 0; softback_top = 0; } @@ -1066,6 +1069,9 @@ static void fbcon_init(struct vc_data *vc, int init) cap = info->flags; + if (console_loglevel <= CONSOLE_LOGLEVEL_QUIET) + logo_shown = FBCON_LOGO_DONTSHOW; + if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW || (info->fix.type == FB_TYPE_TEXT)) logo = 0; @@ -3672,7 +3678,7 @@ static void fbcon_exit(void) } #endif - kfree((void *)softback_buf); + kvfree((void *)softback_buf); softback_buf = 0UL; for_each_registered_fb(i) { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index cb43a2258c51..4721491e6c8c 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -431,6 +431,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, { unsigned int x; + if (image->width > info->var.xres || image->height > info->var.yres) + return; + if (rotate == FB_ROTATE_UR) { for (x = 0; x < num && image->dx + image->width <= info->var.xres; diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index dd3128990776..3558a70a6664 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -978,6 +978,8 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs) get_monspecs(edid, specs); specs->modedb = fb_create_modedb(edid, &specs->modedb_len, specs); + if (!specs->modedb) + return; /* * Workaround for buggy EDIDs that sets that the first diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c index 6b1915872af1..b7aee0c427a8 100644 --- a/drivers/video/fbdev/ffb.c +++ b/drivers/video/fbdev/ffb.c @@ -944,7 +944,7 @@ static int ffb_probe(struct platform_device *op) info->var.accel_flags = FB_ACCELF_TEXT; - if (!strcmp(dp->name, "SUNW,afb")) + if (of_node_name_eq(dp, "SUNW,afb")) par->flags |= FFB_FLAG_AFB; par->board_type = of_getintprop_default(dp, "board_type", 0); diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index f4f76373b2a8..b1906cf5a8f0 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -33,6 +33,8 @@ #include <linux/pci.h> #include <linux/cs5535.h> +#include <asm/olpc.h> + #include "gxfb.h" static char *mode_option; @@ -107,9 +109,6 @@ static struct fb_videomode gx_modedb[] = { FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; -#ifdef CONFIG_OLPC -#include <asm/olpc.h> - static struct fb_videomode gx_dcon_modedb[] = { /* The only mode the DCON has is 1200x900 */ { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, @@ -128,14 +127,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size) } } -#else -static void get_modedb(struct fb_videomode **modedb, unsigned int *size) -{ - *modedb = (struct fb_videomode *) gx_modedb; - *size = ARRAY_SIZE(gx_modedb); -} -#endif - static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xres > 1600 || var->yres > 1200) diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c index 138da6cb6cbc..17ab905811b1 100644 --- a/drivers/video/fbdev/geode/lxfb_core.c +++ b/drivers/video/fbdev/geode/lxfb_core.c @@ -23,6 +23,8 @@ #include <linux/pci.h> #include <linux/uaccess.h> +#include <asm/olpc.h> + #include "lxfb.h" static char *mode_option; @@ -216,9 +218,6 @@ static struct fb_videomode geode_modedb[] = { 0, FB_VMODE_NONINTERLACED, 0 }, }; -#ifdef CONFIG_OLPC -#include <asm/olpc.h> - static struct fb_videomode olpc_dcon_modedb[] = { /* The only mode the DCON has is 1200x900 */ { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, @@ -237,14 +236,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size) } } -#else -static void get_modedb(struct fb_videomode **modedb, unsigned int *size) -{ - *modedb = (struct fb_videomode *) geode_modedb; - *size = ARRAY_SIZE(geode_modedb); -} -#endif - static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xres > 1920 || var->yres > 1440) diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 5d9670daf60e..4b9615e4ce74 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1497,8 +1497,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (pdev->device) { case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */ par->ramdac = IBM; - if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) || - (strcmp(dp->name, "IMS,tt128mb8A") == 0))) + if (of_node_name_eq(dp, "IMS,tt128mb8") || + of_node_name_eq(dp, "IMS,tt128mb8A")) par->ramdac = TVP; break; case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */ diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c index 2bd328883178..09af721638fb 100644 --- a/drivers/video/fbdev/mbx/mbxdebugfs.c +++ b/drivers/video/fbdev/mbx/mbxdebugfs.c @@ -211,36 +211,22 @@ static const struct file_operations misc_fops = { static void mbxfb_debugfs_init(struct fb_info *fbi) { struct mbxfb_info *mfbi = fbi->par; - struct mbxfb_debugfs_data *dbg; - - dbg = kzalloc(sizeof(struct mbxfb_debugfs_data), GFP_KERNEL); - mfbi->debugfs_data = dbg; - - dbg->dir = debugfs_create_dir("mbxfb", NULL); - dbg->sysconf = debugfs_create_file("sysconf", 0444, dbg->dir, - fbi, &sysconf_fops); - dbg->clock = debugfs_create_file("clock", 0444, dbg->dir, - fbi, &clock_fops); - dbg->display = debugfs_create_file("display", 0444, dbg->dir, - fbi, &display_fops); - dbg->gsctl = debugfs_create_file("gsctl", 0444, dbg->dir, - fbi, &gsctl_fops); - dbg->sdram = debugfs_create_file("sdram", 0444, dbg->dir, - fbi, &sdram_fops); - dbg->misc = debugfs_create_file("misc", 0444, dbg->dir, - fbi, &misc_fops); + struct dentry *dir; + + dir = debugfs_create_dir("mbxfb", NULL); + mfbi->debugfs_dir = dir; + + debugfs_create_file("sysconf", 0444, dir, fbi, &sysconf_fops); + debugfs_create_file("clock", 0444, dir, fbi, &clock_fops); + debugfs_create_file("display", 0444, dir, fbi, &display_fops); + debugfs_create_file("gsctl", 0444, dir, fbi, &gsctl_fops); + debugfs_create_file("sdram", 0444, dir, fbi, &sdram_fops); + debugfs_create_file("misc", 0444, dir, fbi, &misc_fops); } static void mbxfb_debugfs_remove(struct fb_info *fbi) { struct mbxfb_info *mfbi = fbi->par; - struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data; - - debugfs_remove(dbg->misc); - debugfs_remove(dbg->sdram); - debugfs_remove(dbg->gsctl); - debugfs_remove(dbg->display); - debugfs_remove(dbg->clock); - debugfs_remove(dbg->sysconf); - debugfs_remove(dbg->dir); + + debugfs_remove_recursive(mfbi->debugfs_dir); } diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 539b85da0897..6ded480a69b4 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -74,7 +74,7 @@ struct mbxfb_info { u32 pseudo_palette[MAX_PALETTES]; #ifdef CONFIG_FB_MBX_DEBUG - void *debugfs_data; + struct dentry *debugfs_dir; #endif }; diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 057d3cdef92e..fbc6eafb63c7 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -141,6 +141,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); + /* fall through */ case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); @@ -210,6 +211,7 @@ static int offb_blank(int blank, struct fb_info *info) /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); + /* fall through */ case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); @@ -646,7 +648,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) } #endif /* kludge for valkyrie */ - if (strcmp(dp->name, "valkyrie") == 0) + if (of_node_name_eq(dp, "valkyrie")) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : NULL, width, height, depth, pitch, address, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index b4bcf3a4a647..b5956a1a30d4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -110,19 +110,12 @@ DEFINE_SHOW_ATTRIBUTE(dss); static struct dentry *dss_debugfs_dir; -static int dss_initialize_debugfs(void) +static void dss_initialize_debugfs(void) { dss_debugfs_dir = debugfs_create_dir("omapdss", NULL); - if (IS_ERR(dss_debugfs_dir)) { - int err = PTR_ERR(dss_debugfs_dir); - dss_debugfs_dir = NULL; - return err; - } debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, &dss_debug_dump_clocks, &dss_fops); - - return 0; } static void dss_uninitialize_debugfs(void) @@ -130,26 +123,19 @@ static void dss_uninitialize_debugfs(void) debugfs_remove_recursive(dss_debugfs_dir); } -int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) +void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) { - struct dentry *d; - - d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, - write, &dss_fops); - - return PTR_ERR_OR_ZERO(d); + debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, write, &dss_fops); } #else /* CONFIG_FB_OMAP2_DSS_DEBUGFS */ -static inline int dss_initialize_debugfs(void) +static inline void dss_initialize_debugfs(void) { - return 0; } static inline void dss_uninitialize_debugfs(void) { } -int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) +void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) { - return 0; } #endif /* CONFIG_FB_OMAP2_DSS_DEBUGFS */ @@ -182,15 +168,11 @@ static struct notifier_block omap_dss_pm_notif_block = { static int __init omap_dss_probe(struct platform_device *pdev) { - int r; - core.pdev = pdev; dss_features_init(omapdss_get_version()); - r = dss_initialize_debugfs(); - if (r) - goto err_debugfs; + dss_initialize_debugfs(); if (def_disp_name) core.default_display_name = def_disp_name; @@ -198,10 +180,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) register_pm_notifier(&omap_dss_pm_notif_block); return 0; - -err_debugfs: - - return r; } static int omap_dss_remove(struct platform_device *pdev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index f1eb8b0f8a2a..5ce893c1923d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -60,7 +60,7 @@ omapdss_of_get_next_port(const struct device_node *parent, return NULL; } prev = port; - } while (of_node_cmp(port->name, "port") != 0); + } while (!of_node_name_eq(port, "port")); of_node_put(ports); } @@ -83,7 +83,7 @@ omapdss_of_get_next_endpoint(const struct device_node *parent, if (!ep) return NULL; prev = ep; - } while (of_node_cmp(ep->name, "endpoint") != 0); + } while (!of_node_name_eq(ep, "endpoint")); return ep; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h index a3cc0ca8f9d2..b1a354494144 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h @@ -214,7 +214,7 @@ struct platform_device *dss_get_core_pdev(void); int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask); void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask); int dss_set_min_bus_tput(struct device *dev, unsigned long tput); -int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); +void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); /* display */ int dss_suspend_all_devices(void); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c index fa72e735dad2..d146793dd044 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c @@ -712,7 +712,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, else acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; /* - * The I2S input word length is twice the lenght given in the IEC-60958 + * The I2S input word length is twice the length given in the IEC-60958 * status word. If the word size is greater than * 20 bits, increment by one. */ diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 4061a20cfe24..3b361bc9feb8 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -667,10 +667,10 @@ static int ssd1307fb_probe(struct i2c_client *client, if (par->reset) { /* Reset the screen */ - gpiod_set_value_cansleep(par->reset, 0); - udelay(4); gpiod_set_value_cansleep(par->reset, 1); udelay(4); + gpiod_set_value_cansleep(par->reset, 0); + udelay(4); } if (par->vbat_reg) { diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index 7bb7e90b8f00..bdf5a0ea876d 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c @@ -2110,7 +2110,7 @@ MODULE_PARM_DESC(viafb_lcd_panel_id, module_param(viafb_lcd_dsp_method, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_dsp_method, - "Set Flat Panel display scaling method.(Default=Expandsion)"); + "Set Flat Panel display scaling method.(Default=Expansion)"); module_param(viafb_SAMM_ON, int, S_IRUSR); MODULE_PARM_DESC(viafb_SAMM_ON, diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 39b229f9e256..d37dd5bb7a8f 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -604,6 +604,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) while (pgno < nr_pages) { page = balloon_retrieve(true); if (page) { + __ClearPageOffline(page); pages[pgno++] = page; #ifdef CONFIG_XEN_HAVE_PVMMU /* @@ -645,8 +646,10 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) mutex_lock(&balloon_mutex); for (i = 0; i < nr_pages; i++) { - if (pages[i]) + if (pages[i]) { + __SetPageOffline(pages[i]); balloon_append(pages[i]); + } } balloon_stats.target_unpopulated -= nr_pages; |