diff options
Diffstat (limited to 'drivers')
245 files changed, 2727 insertions, 1335 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 40e816419f48..9601fa92950a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2010,16 +2010,16 @@ retry: return err_mask; } -static bool ata_log_supported(struct ata_device *dev, u8 log) +static int ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) - return false; + return 0; if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) - return false; - return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; + return 0; + return get_unaligned_le16(&ap->sector_buf[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) @@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (ata_id_major_version(dev->id) < 11 || - !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11) + goto out; + + buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES); + if (buf_len == 0) goto out; /* * Read the concurrent positioning ranges log (0x47). We can have at - * most 255 32B range descriptors plus a 64B header. + * most 255 32B range descriptors plus a 64B header. This log varies in + * size, so use the size reported in the GPL directory. Reading beyond + * the supported length will result in an error. */ - buf_len = (64 + 255 * 32 + 511) & ~511; + buf_len <<= 9; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; @@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 42cecf95a4e5..86dbb1cdfabd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ rbuf[1] = 0xb9; - put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]); + put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { desc[0] = cpr_log->cpr[i].num; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index ca129854a88c..c38027887499 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -196,7 +196,7 @@ static struct { { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; -ata_bitfield_name_match(xfer,ata_xfer_names) +ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 6b5ed3046b44..35608a0cf552 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev) int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } @@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev) irq = i; irq_handler = octeon_cf_interrupt; } + put_device(&dma_dev->dev); } of_node_put(dma_node); } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2ef23fce0860..a97776ea9d99 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -564,6 +564,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -573,6 +579,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -584,6 +591,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, NULL }; diff --git a/drivers/base/init.c b/drivers/base/init.c index d8d0fe687111..397eb9880cec 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/memory.h> #include <linux/of.h> +#include <linux/backing-dev.h> #include "base.h" @@ -20,6 +21,7 @@ void __init driver_init(void) { /* These are the core pieces */ + bdi_init(&noop_backing_dev_info); devtmpfs_init(); devices_init(); buses_init(); diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index e81a9700cfd0..6143dbf31f31 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -1239,14 +1239,14 @@ error_cleanup_mc_io: static int fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); + struct fsl_mc_io *mc_io; if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) return -EINVAL; + mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); - - fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); - mc->root_mc_bus_dev->mc_io = NULL; + fsl_destroy_mc_io(mc_io); bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 69fd31ffb847..0b6c03643ddc 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -429,28 +429,40 @@ config ADI driver include crash and makedumpfile. config RANDOM_TRUST_CPU - bool "Trust the CPU manufacturer to initialize Linux's CRNG" + bool "Initialize RNG using CPU RNG instructions" + default y depends on ARCH_RANDOM - default n help - Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or - RDRAND, IBM for the S390 and Power PC architectures) is trustworthy - for the purposes of initializing Linux's CRNG. Since this is not - something that can be independently audited, this amounts to trusting - that CPU manufacturer (perhaps with the insistence or mandate - of a Nation State's intelligence or law enforcement agencies) - has not installed a hidden back door to compromise the CPU's - random number generation facilities. This can also be configured - at boot with "random.trust_cpu=on/off". + Initialize the RNG using random numbers supplied by the CPU's + RNG instructions (e.g. RDRAND), if supported and available. These + random numbers are never used directly, but are rather hashed into + the main input pool, and this happens regardless of whether or not + this option is enabled. Instead, this option controls whether the + they are credited and hence can initialize the RNG. Additionally, + other sources of randomness are always used, regardless of this + setting. Enabling this implies trusting that the CPU can supply high + quality and non-backdoored random numbers. + + Say Y here unless you have reason to mistrust your CPU or believe + its RNG facilities may be faulty. This may also be configured at + boot time with "random.trust_cpu=on/off". config RANDOM_TRUST_BOOTLOADER - bool "Trust the bootloader to initialize Linux's CRNG" - help - Some bootloaders can provide entropy to increase the kernel's initial - device randomness. Say Y here to assume the entropy provided by the - booloader is trustworthy so it will be added to the kernel's entropy - pool. Otherwise, say N here so it will be regarded as device input that - only mixes the entropy pool. This can also be configured at boot with - "random.trust_bootloader=on/off". + bool "Initialize RNG using bootloader-supplied seed" + default y + help + Initialize the RNG using a seed supplied by the bootloader or boot + environment (e.g. EFI or a bootloader-generated device tree). This + seed is not used directly, but is rather hashed into the main input + pool, and this happens regardless of whether or not this option is + enabled. Instead, this option controls whether the seed is credited + and hence can initialize the RNG. Additionally, other sources of + randomness are always used, regardless of this setting. Enabling + this implies trusting that the bootloader can supply high quality and + non-backdoored seeds. + + Say Y here unless you have reason to mistrust your bootloader or + believe its RNG facilities may be faulty. This may also be configured + at boot time with "random.trust_bootloader=on/off". endmenu diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index e856df7e285c..a6f3a8a2aca6 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev) goto err_find; } + virtio_device_ready(vdev); + /* we always have a pending entropy request */ request_entropy(vi); diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0e22e3b0a04e..38aad99ebb61 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = { static int __init lp_init(void) { - int i, err = 0; + int i, err; if (parport_nr[0] == LP_PARPORT_OFF) return 0; diff --git a/drivers/char/random.c b/drivers/char/random.c index b691b9d59503..655e327d425e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ - execute_in_process_context(crng_set_ready, &set_ready); + if (static_key_initialized) + execute_in_process_context(crng_set_ready, &set_ready); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); @@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits) * **********************************************************************/ -static bool used_arch_random; -static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); +static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) { return kstrtobool(arg, &trust_cpu); @@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica int __init random_init(const char *command_line) { ktime_t now = ktime_get_real(); - unsigned int i, arch_bytes; + unsigned int i, arch_bits; unsigned long entropy; #if defined(LATENT_ENTROPY_PLUGIN) @@ -784,12 +784,12 @@ int __init random_init(const char *command_line) _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8; i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { if (!arch_get_random_seed_long_early(&entropy) && !arch_get_random_long_early(&entropy)) { entropy = random_get_entropy(); - arch_bytes -= sizeof(entropy); + arch_bits -= sizeof(entropy) * 8; } _mix_pool_bytes(&entropy, sizeof(entropy)); } @@ -798,11 +798,18 @@ int __init random_init(const char *command_line) _mix_pool_bytes(command_line, strlen(command_line)); add_latent_entropy(); + /* + * If we were initialized by the bootloader before jump labels are + * initialized, then we should enable the static branch here, where + * it's guaranteed that jump labels have been initialized. + */ + if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) + crng_set_ready(NULL); + if (crng_ready()) crng_reseed(); else if (trust_cpu) - credit_init_bits(arch_bytes * 8); - used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; + _credit_init_bits(arch_bits); WARN_ON(register_pm_notifier(&pm_notifier)); @@ -812,17 +819,6 @@ int __init random_init(const char *command_line) } /* - * Returns whether arch randomness has been mixed into the initial - * state of the RNG, regardless of whether or not that randomness - * was credited. Knowing this is only good for a very limited set - * of uses, such as early init printk pointer obfuscation. - */ -bool rng_has_arch_random(void) -{ - return used_arch_random; -} - -/* * Add device- or boot-specific data to the input pool to help * initialize it. * @@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void __cold add_bootloader_randomness(const void *buf, size_t len) +void __init add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) credit_init_bits(len * 8); } -EXPORT_SYMBOL_GPL(add_bootloader_randomness); #if IS_ENABLED(CONFIG_VMGENID) static BLOCKING_NOTIFIER_HEAD(vmfork_chain); diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index ff188ab68496..bb47610bbd1c 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -565,4 +565,3 @@ void __init hv_init_clocksource(void) hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } -EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c index 46023adc5395..4536ed43f65b 100644 --- a/drivers/comedi/drivers/vmk80xx.c +++ b/drivers/comedi/drivers/vmk80xx.c @@ -684,7 +684,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) if (!devpriv->usb_rx_buf) return -ENOMEM; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index e7330684d3b8..9631f2fd2faf 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; + pgoff_t pgoff = vmf->pgoff; - vmf->page = ubuf->pages[vmf->pgoff]; + if (pgoff >= ubuf->pagecount) + return VM_FAULT_SIGBUS; + vmf->page = ubuf->pages[pgoff]; get_page(vmf->page); return 0; } diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index b55c74a5e064..1ee62cd58582 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/seq_file.h> +#include <linux/types.h> #define CRYSTALCOVE_GPIO_NUM 16 #define CRYSTALCOVE_VGPIO_NUM 95 @@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) return reg + gpio % 8; } -static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, - int gpio) +static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio) { u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0; int mask = BIT(gpio % 8); @@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio) return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); } -static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, - int value) +static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio) return val & 0x1; } -static void crystalcove_gpio_set(struct gpio_chip *chip, - unsigned int gpio, int value) +static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip, static int crystalcove_irq_type(struct irq_data *data, unsigned int type) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) + if (hwirq >= CRYSTALCOVE_GPIO_NUM) return 0; switch (type) { @@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type) static void crystalcove_bus_lock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); mutex_lock(&cg->buslock); } static void crystalcove_bus_sync_unlock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int gpio = data->hwirq; + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); if (cg->update & UPDATE_IRQ_TYPE) - crystalcove_update_irq_ctrl(cg, gpio); + crystalcove_update_irq_ctrl(cg, hwirq); if (cg->update & UPDATE_IRQ_MASK) - crystalcove_update_irq_mask(cg, gpio); + crystalcove_update_irq_mask(cg, hwirq); cg->update = 0; mutex_unlock(&cg->buslock); @@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data) static void crystalcove_irq_unmask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = false; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + gpiochip_enable_irq(gc, hwirq); + + cg->set_irq_mask = false; + cg->update |= UPDATE_IRQ_MASK; } static void crystalcove_irq_mask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = true; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + cg->set_irq_mask = true; + cg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(gc, hwirq); } -static struct irq_chip crystalcove_irqchip = { +static const struct irq_chip crystalcove_irqchip = { .name = "Crystal Cove", .irq_mask = crystalcove_irq_mask, .irq_unmask = crystalcove_irq_unmask, .irq_set_type = crystalcove_irq_type, .irq_bus_lock = crystalcove_bus_lock, .irq_bus_sync_unlock = crystalcove_bus_sync_unlock, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) @@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static void crystalcove_gpio_dbg_show(struct seq_file *s, - struct gpio_chip *chip) +static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int gpio, offset; @@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev) cg->regmap = pmic->regmap; girq = &cg->chip.irq; - girq->chip = &crystalcove_irqchip; + gpio_irq_chip_set_chip(girq, &crystalcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 08b9e2cf4f2d..71fa437b491f 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -46,7 +46,6 @@ struct dln2_gpio { struct platform_device *pdev; struct gpio_chip gpio; - struct irq_chip irqchip; /* * Cache pin direction to save us one transfer, since the hardware has @@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd) struct dln2_gpio *dln2 = gpiochip_get_data(gc); int pin = irqd_to_hwirq(irqd); + gpiochip_enable_irq(gc, pin); set_bit(pin, dln2->unmasked_irqs); } @@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd) int pin = irqd_to_hwirq(irqd); clear_bit(pin, dln2->unmasked_irqs); + gpiochip_disable_irq(gc, pin); } static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) @@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd) mutex_unlock(&dln2->irq_lock); } +static const struct irq_chip dln2_irqchip = { + .name = "dln2-irq", + .irq_mask = dln2_irq_mask, + .irq_unmask = dln2_irq_unmask, + .irq_set_type = dln2_irq_set_type, + .irq_bus_lock = dln2_irq_bus_lock, + .irq_bus_sync_unlock = dln2_irq_bus_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static void dln2_gpio_event(struct platform_device *pdev, u16 echo, const void *data, int len) { @@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev) dln2->gpio.direction_output = dln2_gpio_direction_output; dln2->gpio.set_config = dln2_gpio_set_config; - dln2->irqchip.name = "dln2-irq", - dln2->irqchip.irq_mask = dln2_irq_mask, - dln2->irqchip.irq_unmask = dln2_irq_unmask, - dln2->irqchip.irq_set_type = dln2_irq_set_type, - dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock, - dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock, - girq = &dln2->gpio.irq; - girq->chip = &dln2->irqchip; + gpio_irq_chip_set_chip(girq, &dln2_irqchip); /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 04afe728e187..c22fcaa44a61 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio) gpio->clks[1].id = "db"; err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, gpio->clks); - if (err) { - dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n"); - return err; - } + if (err) + return dev_err_probe(gpio->dev, err, + "Cannot get APB/Debounce clocks\n"); err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index f3d1baeacbe9..72ac09a59702 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&priv->lock, flags); } -static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) +static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask) { - struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); - u32 gpio = irqd_to_hwirq(d); void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR); unsigned long flags; u32 value; @@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) static void mrfld_irq_mask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, false); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + mrfld_irq_unmask_mask(priv, gpio, false); + gpiochip_disable_irq(&priv->chip, gpio); } static void mrfld_irq_unmask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, true); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + gpiochip_enable_irq(&priv->chip, gpio); + mrfld_irq_unmask_mask(priv, gpio, true); } static int mrfld_irq_set_type(struct irq_data *d, unsigned int type) @@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on) return 0; } -static struct irq_chip mrfld_irqchip = { +static const struct irq_chip mrfld_irqchip = { .name = "gpio-merrifield", .irq_ack = mrfld_irq_ack, .irq_mask = mrfld_irq_mask, .irq_unmask = mrfld_irq_unmask, .irq_set_type = mrfld_irq_set_type, .irq_set_wake = mrfld_irq_set_wake, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void mrfld_irq_handler(struct irq_desc *desc) @@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id return retval; girq = &priv->chip.irq; - girq->chip = &mrfld_irqchip; + gpio_irq_chip_set_chip(girq, &mrfld_irqchip); girq->init_hw = mrfld_irq_init_hw; girq->parent_handler = mrfld_irq_handler; girq->num_parents = 1; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index acda4c5052d3..8a83f7bf4382 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -38,7 +38,6 @@ struct sch_gpio { struct gpio_chip chip; - struct irq_chip irqchip; spinlock_t lock; unsigned short iobase; unsigned short resume_base; @@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d) spin_unlock_irqrestore(&sch->lock, flags); } -static void sch_irq_mask_unmask(struct irq_data *d, int val) +static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct sch_gpio *sch = gpiochip_get_data(gc); - irq_hw_number_t gpio_num = irqd_to_hwirq(d); unsigned long flags; spin_lock_irqsave(&sch->lock, flags); @@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val) static void sch_irq_mask(struct irq_data *d) { - sch_irq_mask_unmask(d, 0); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + sch_irq_mask_unmask(gc, gpio_num, 0); + gpiochip_disable_irq(gc, gpio_num); } static void sch_irq_unmask(struct irq_data *d) { - sch_irq_mask_unmask(d, 1); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, gpio_num); + sch_irq_mask_unmask(gc, gpio_num, 1); } +static const struct irq_chip sch_irqchip = { + .name = "sch_gpio", + .irq_ack = sch_irq_ack, + .irq_mask = sch_irq_mask, + .irq_unmask = sch_irq_unmask, + .irq_set_type = sch_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context) { struct sch_gpio *sch = context; @@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sch); - sch->irqchip.name = "sch_gpio"; - sch->irqchip.irq_ack = sch_irq_ack; - sch->irqchip.irq_mask = sch_irq_mask; - sch->irqchip.irq_unmask = sch_irq_unmask; - sch->irqchip.irq_set_type = sch_irq_type; - girq = &sch->chip.irq; - girq->chip = &sch->irqchip; + gpio_irq_chip_set_chip(girq, &sch_irqchip); girq->num_parents = 0; girq->parents = NULL; girq->parent_handler = NULL; diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index 16a0fae1e32e..c18b6b47384f 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data) if (gpio >= WCOVE_GPIO_NUM) return; + gpiochip_enable_irq(chip, gpio); + wg->set_irq_mask = false; wg->update |= UPDATE_IRQ_MASK; } @@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data) wg->set_irq_mask = true; wg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(chip, gpio); } -static struct irq_chip wcove_irqchip = { +static const struct irq_chip wcove_irqchip = { .name = "Whiskey Cove", .irq_mask = wcove_irq_mask, .irq_unmask = wcove_irq_unmask, .irq_set_type = wcove_irq_type, .irq_bus_lock = wcove_bus_lock, .irq_bus_sync_unlock = wcove_bus_sync_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t wcove_gpio_irq_handler(int irq, void *data) @@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev) } girq = &wg->chip.irq; - girq->chip = &wcove_irqchip; + gpio_irq_chip_set_chip(girq, &wcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 67abf8dcd30a..6b6d46e29e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, return -EINVAL; } - /* delete kgd_mem from kfd_bo_list to avoid re-validating - * this BO in BO's restoring after eviction. - */ mutex_lock(&mem->process_info->lock); ret = amdgpu_bo_reserve(bo, true); @@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, amdgpu_amdkfd_remove_eviction_fence( bo, mem->process_info->eviction_fence); - list_del_init(&mem->validate_list.head); if (size) *size = amdgpu_bo_size(bo); @@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) process_info->eviction_fence = new_fence; *ef = dma_fence_get(&new_fence->base); - /* Attach new eviction fence to all BOs */ + /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) + validate_list.head) { + if (mem->bo->tbo.pin_count) + continue; + amdgpu_bo_fence(mem->bo, &process_info->eviction_fence->base, true); - + } /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ede2fa56f6c9..16699158e00d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; - r = amdgpu_ras_block_late_init(adev, ras_block); - if (r) - return r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini; + } else { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 798c56214a23..aebc384531ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 1, 0): /* RENOIR looks like RAVEN */ case IP_VERSION(9, 3, 0): + /* GC 10.3.7 */ + case IP_VERSION(10, 3, 7): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - /* GC 10.3.7 */ - case IP_VERSION(10, 3, 7): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 801f6fa692e9..6de63ea6687e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->visible_pin_size), vram_gtt.vram_size); vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; - vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.cpu_accessible_vram.usable_heap_size * 3 / 4; mem.gtt.total_heap_size = gtt_man->size; - mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2de9309a4193..dac202ae864d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; + /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); + } + s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", "ue", info.ue_count, "ce", info.ce_count); @@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) - DRM_WARN("Failed to reset error counter and error status"); + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, @@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - if (!amdgpu_persistent_edc_harvesting_supported(adev)) - amdgpu_ras_reset_error_status(adev, info->head.block); - return 0; } @@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (res) return res; + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } + ce += info.ce_count; ue += info.ue_count; } @@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) continue; amdgpu_ras_query_error_status(adev, &info); + + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } } } @@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - if (!(amdgpu_sriov_vf(adev) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))) + /* If driver run on sriov guest side, only enable ras for aldebaran */ + if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) return; if (!adev->gmc.xgmi.connected_to_cpu) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ceeaa4c793a..dc76d2b3ce52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, { struct amdgpu_vm_update_params params; struct amdgpu_vm_bo_base *entry; + bool flush_tlb_needed = false; int r, idx; if (list_empty(&vm->relocated)) @@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, goto error; list_for_each_entry(entry, &vm->relocated, vm_status) { + /* vm_flush_needed after updating moved PDEs */ + flush_tlb_needed |= entry->moved; + r = amdgpu_vm_pde_update(¶ms, entry); if (r) goto error; @@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - /* vm_flush_needed after updating PDEs */ - atomic64_inc(&vm->tlb_seq); + if (flush_tlb_needed) + atomic64_inc(&vm->tlb_seq); while (!list_empty(&vm->relocated)) { entry = list_first_entry(&vm->relocated, @@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, flush_tlb |= adev->gmc.xgmi.num_physical_nodes && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); + /* + * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB + */ + flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8c0a3fc7aaa6..a4a6751b1e44 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) - *(uint64_t *)fw_autoload_mask |= 1 << id; + *(uint64_t *)fw_autoload_mask |= 1ULL << id; } static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, @@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev) return 0; } -void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); @@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, break; default: BUG(); + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index a0c0b7d9f444..7f4b480ae66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); +#ifdef CONFIG_X86_64 + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { + adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); + adev->gmc.aper_size = adev->gmc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 5d2dfeff8fe5..d63d3f2b8a16 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] = IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0) }; -void program_imu_rlc_ram(struct amdgpu_device *adev, +static void program_imu_rlc_ram(struct amdgpu_device *adev, const struct imu_rlc_ram_golden *regs, const u32 array_size) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index d2722adabd1b..f3c1af5130ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, { unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT)); + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, (vmid | (vmid << 4))); @@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ 8 + 16, - .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */ .emit_ib = jpeg_v2_0_dec_ring_emit_ib, .emit_fence = jpeg_v2_0_dec_ring_emit_fence, .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 1a03baa59755..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -41,6 +41,7 @@ #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index fcf51947bb18..7eee004cf3ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) /* This function is for backdoor MES firmware */ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) + enum admgpu_mes_pipe pipe, bool prime_icache) { int r; uint32_t data; @@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */ WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF); - /* invalidate ICACHE */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); - - /* prime the ICACHE. */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + if (prime_icache) { + /* invalidate ICACHE */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + + /* prime the ICACHE. */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) int r = 0; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE); + + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false); if (r) { - DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); + DRM_ERROR("failed to load MES fw, r=%d\n", r); return r; } - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE); + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true); if (r) { - DRM_ERROR("failed to load MES fw, r=%d\n", r); + DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); return r; } + } mes_v11_0_enable(adev, true); @@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle) if (!adev->enable_mes_kiq) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { r = mes_v11_0_load_microcode(adev, - AMDGPU_MES_SCHED_PIPE); + AMDGPU_MES_SCHED_PIPE, true); if (r) { DRM_ERROR("failed to MES fw, r=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d016e3c3e221..b3fba8dea63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs yc_video_codecs_decode = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 06b2635b142a..83c6ccaaa9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } } + /** * sdma_v5_2_gfx_stop - stop the gfx async dma engines * @@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines - * context switch for an instance + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer - * @instance_idx: the index of the SDMA instance + * @enable: enable/disable the DMA MEs context switch. * - * Unhalt the async dma engines context switch. + * Halt or unhalt the async dma engines context switch. */ -static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx) +static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl, phase_quantum = 0; - - if (WARN_ON(instance_idx >= adev->sdma.num_instances)) { - return; - } + int i; if (amdgpu_sdma_phase_quantum) { unsigned value = amdgpu_sdma_phase_quantum; @@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, phase_quantum = value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; - - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM), - phase_quantum); } - if (!amdgpu_sriov_vf(adev)) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl); + for (i = 0; i < adev->sdma.num_instances; i++) { + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } + + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } } + } /** - * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch + * sdma_v5_2_enable - stop the async dma engines * * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. * - * Halt the async dma engines context switch. + * Halt or unhalt the async dma engines. */ -static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev) +static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; - if (amdgpu_sriov_vf(adev)) - return; - - for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!enable) { + sdma_v5_2_gfx_stop(adev); + sdma_v5_2_rlc_stop(adev); } -} - -/** - * sdma_v5_2_halt - stop the async dma engines - * - * @adev: amdgpu_device pointer - * - * Halt the async dma engines. - */ -static void sdma_v5_2_halt(struct amdgpu_device *adev) -{ - int i; - u32 f32_cntl; - - sdma_v5_2_gfx_stop(adev); - sdma_v5_2_rlc_stop(adev); if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); } } @@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them. - * It assumes that the dma engine is stopped for each instance. - * The function enables the engine and preemptions sequentially for each instance. - * * Returns 0 for success, error for failure. */ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) @@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring->sched.ready = true; - sdma_v5_2_ctx_switch_enable_for_instance(adev, i); + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } r = amdgpu_ring_test_ring(ring); if (r) { @@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) int i, j; /* halt the MEs */ - sdma_v5_2_halt(adev); + sdma_v5_2_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { if (!adev->sdma.instance[i].fw) @@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) int r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); /* set RB registers */ r = sdma_v5_2_gfx_resume(adev); @@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); sdma_v5_2_soft_reset(adev); + /* unhalt the MEs */ + sdma_v5_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v5_2_ctx_switch_enable(adev, true); - /* Soft reset supposes to disable the dma engine and preemption. - * Now start the gfx rings and rlc compute queues. - */ + /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); if (adev->in_s0ix) amdgpu_gfx_off_ctrl(adev, true); @@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3cabceee5f57..39405f0db824 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) { struct drm_gpu_scheduler **scheds; /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) + if (atomic_read(&p->entity->fence_seq)) return -EINVAL; scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); + drm_sched_entity_modify_sched(p->entity, scheds, 1); return 0; } -static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) { struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *map; @@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; - r = vcn_v3_0_limit_sched(p, job); + r = vcn_v3_0_limit_sched(p); if (r) goto out; } @@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); uint32_t msg_lo = 0, msg_hi = 0; unsigned i; int r; @@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, msg_hi = val; } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && val == 0) { - r = vcn_v3_0_dec_msg(p, job, - ((u64)msg_hi) << 32 | msg_lo); + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5e9adbc71bbd..cbfb32b3d235 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); break; case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */ + case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */ pcache_info = yellow_carp_cache_info; num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8667e3df2d0b..bf4200457772 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 1, 2):/* RENOIR */ case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ + case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ case IP_VERSION(6, 0, 1): kfd->device_info.num_sdma_queues_per_engine = 2; break; @@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ + case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ + case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ case IP_VERSION(10, 1, 4): case IP_VERSION(10, 1, 10): /* NAVI10 */ @@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ - if (gc_version >= IP_VERSION(10, 3, 0)) + if (gc_version == IP_VERSION(10, 3, 6)) + kfd->device_info.no_atomic_fw_version = 14; + else if (gc_version >= IP_VERSION(10, 3, 0)) kfd->device_info.no_atomic_fw_version = 92; else if (gc_version >= IP_VERSION(10, 1, 1)) kfd->device_info.no_atomic_fw_version = 145; @@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v10_3_kfd2kgd; break; + case IP_VERSION(10, 3, 6): + gfx_target_version = 100306; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 7): + gfx_target_version = 100307; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; case IP_VERSION(11, 0, 0): gfx_target_version = 110000; f2g = &gfx_v11_kfd2kgd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 997650d597ec..e44376c2ecdc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch) { - uint64_t npages = migrate->cpages; + uint64_t npages = migrate->npages; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, mfence); if (r) goto out_free_vram_pages; - amdgpu_res_next(&cursor, j << PAGE_SHIFT); + amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); j = 0; } else { amdgpu_res_next(&cursor, PAGE_SIZE); @@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, continue; } src[i] = svm_migrate_addr(adev, spage); - if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { + if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, FROM_VRAM_TO_RAM, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2ebf0132c25b..7b332246eda3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, - last_start - prange->start, + (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, NULL, dma_addr, &vm->last_update); @@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; + if (!mmget_not_zero(mni->mm)) + return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); + mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 70be67a56673..39b425d83bb1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2812,7 +2812,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2842,7 +2842,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2870,8 +2870,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ceb34376decb..bca5f01da763 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) void dcn31_init_clocks(struct clk_mgr *clk_mgr) { + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; @@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base) } } +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + return clk_mgr_base->clks.ref_dtbclk_khz; +} + static struct clk_mgr_funcs dcn31_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn31_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn31_enable_pme_wa, @@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct( } clk_mgr->base.base.dprefclk_khz = 600000; - clk_mgr->base.dccg->ref_dtbclk_khz = 600000; + clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index 961b10a49486..be06fdbd0c22 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base); + void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN31_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index a2ade6e93f5e..fb4ae800e919 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -41,9 +41,7 @@ #include "dc_dmub_srv.h" -#if defined (CONFIG_DRM_AMD_DC_DP2_0) #include "dc_link_dp.h" -#endif #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, @@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn315_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index fc3af81ed6c6..e4bb9c6193b5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params( static struct clk_mgr_funcs dcn316_funcs = { .enable_pme_wa = dcn316_enable_pme_wa, .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn316_update_clocks, .init_clocks = dcn31_init_clocks, .are_clock_states_equal = dcn31_are_clock_states_equal, @@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); /*clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dc30ac366a50..cbc47aecd00f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = { static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); @@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries( enum dp_panel_mode panel_mode = dp_get_panel_mode(link); enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); int fail_count = 0; bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ @@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries( dp_trace_commit_lt_init(link); - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) /* We need to do this before the link training to ensure the idle * pattern in SST mode will be sent right after the link training @@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status); - /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to - * minimum link bandwidth. + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Fail link training if reduced link bandwidth no longer meets + * stream requirements. */ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - - if (is_link_bw_low) - DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, req_bw, link_bw); + if (req_bw > link_bw) + break; } msleep(delay_between_attempts); @@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap( int *fail_count) { struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings initial_link_settings = *known_limit_link_setting; + struct dc_link_settings max_link_settings = *known_limit_link_setting; bool success = false; bool skip_video_pattern; enum clock_source_id dp_cs_id = get_clock_source_id(link); @@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap( struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = initial_link_settings; + cur_link_settings = max_link_settings; /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { @@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap( dp_trace_lt_result_update(link, status, true); dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, - initial_link_settings, &cur_link_settings, status)); + &max_link_settings, &cur_link_settings, status)); link->verified_link_cap = success ? cur_link_settings : fail_safe_link_settings; @@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy( */ static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result) { - if (!current_link_setting) + if (!cur) return false; - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + if (!max) + return false; + + if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings, - current_link_setting, training_result); + return decide_fallback_link_setting_max_bw_policy(link, max, cur, + training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting( case LINK_TRAINING_CR_FAIL_LANE23: case LINK_TRAINING_LQA_FAIL: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - } else if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->link_rate = - initial_link_settings.link_rate; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; if (training_result == LINK_TRAINING_CR_FAIL_LANE0) return false; else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - current_link_setting->lane_count = - LANE_COUNT_ONE; - else if (training_result == - LINK_TRAINING_CR_FAIL_LANE23) - current_link_setting->lane_count = - LANE_COUNT_TWO; + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; else - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); + cur->lane_count = reduce_lane_count(cur->lane_count); } else { return false; } @@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_EQ: { - if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); - } else if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } @@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_CR: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3960c74482be..817028d3c4a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.186" +#define DC_VER "3.2.187" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -416,6 +416,7 @@ struct dc_clocks { bool p_state_change_support; enum dcn_zstate_support_state zstate_support; bool dtbclk_en; + int ref_dtbclk_khz; enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of @@ -719,6 +720,8 @@ struct dc_debug_options { bool apply_vendor_specific_lttpr_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; + /* uses value at boot and disables switch */ + bool disable_dtb_ref_clk_switch; uint8_t psr_power_use_phy_fsm; enum dml_hostvm_override_opts dml_hostvm_override; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 287a1066b547..bbc58d167c63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -513,12 +513,10 @@ void dccg31_set_physymclk( /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ static void dccg31_set_dtbclk_dto( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing) + struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + int req_dtbclk_khz = params->pixclk_khz; uint32_t dtbdto_div; /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register @@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto( * DSC native 4:2:2 pixel rate/2 4 * Other modes pixel rate 8 */ - if (num_odm_segments == 4) { + if (params->num_odm_segments == 4) { dtbdto_div = 2; - req_dtbclk_khz = req_dtbclk_khz / 4; - } else if ((num_odm_segments == 2) || - (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple)) { + req_dtbclk_khz = params->pixclk_khz / 4; + } else if ((params->num_odm_segments == 2) || + (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || + (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !params->timing->dsc_cfg.ycbcr422_simple)) { dtbdto_div = 4; - req_dtbclk_khz = req_dtbclk_khz / 2; + req_dtbclk_khz = params->pixclk_khz / 2; } else dtbdto_div = 8; - if (dccg->ref_dtbclk_khz && req_dtbclk_khz) { + if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref - modulo = dccg->ref_dtbclk_khz * 1000; - phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1), - dccg->ref_dtbclk_khz); + modulo = params->ref_dtbclk_khz * 1000; + phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1), + params->ref_dtbclk_khz); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase); + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 1); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 1); - REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1, + REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - PIPE_DTO_SRC_SEL[dtbclk_inst], 1); - - dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz; + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + PIPE_DTO_SRC_SEL[params->otg_inst], 1); } else { - REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 0, - PIPE_DTO_SRC_SEL[dtbclk_inst], 0, - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0, + PIPE_DTO_SRC_SEL[params->otg_inst], 0, + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0); - - dccg->dtbclk_khz[dtbclk_inst] = 0; + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } @@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto( REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK - - dccg->audio_dtbclk_khz = req_audio_dtbclk_khz; } else { REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO - - dccg->audio_dtbclk_khz = 0; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index d94fd1010deb..8b12b4111c88 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc) AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ - AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c index 789f7562cdc7..d2273674e872 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; -#if defined (CONFIG_DRM_AMD_DC_DP2_0) if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; -#endif } return false; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 46ce5a0ee4ec..b5570aa8e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -237,6 +237,7 @@ struct clk_mgr_funcs { bool safe_to_lower); int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr); void (*set_low_power_state)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b2fa4de47734..c7021915bac8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -60,8 +60,17 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; - int dtbclk_khz[MAX_PIPES]; - int audio_dtbclk_khz; + //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ + //int audio_dtbclk_khz;/* TODO needs to be removed */ + int ref_dtbclk_khz;/* TODO needs to be removed */ +}; + +struct dtbclk_dto_params { + const struct dc_crtc_timing *timing; + int otg_inst; + int pixclk_khz; + int req_audio_dtbclk_khz; + int num_odm_segments; int ref_dtbclk_khz; }; @@ -111,10 +120,7 @@ struct dccg_funcs { void (*set_dtbclk_dto)( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing); + struct dtbclk_dto_params *dto_params); void (*set_audio_dtbclk_dto)( struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c index 87972dc8443d..ea6cf8bfce30 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c @@ -27,6 +27,7 @@ #include "core_types.h" #include "dccg.h" #include "dc_link_dp.h" +#include "clk_mgr.h" static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) { @@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; - int odm_segment_count = get_odm_segment_count(pipe_ctx); + struct dtbclk_dto_params dto_params = {0}; enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst); dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk, - odm_segment_count, - &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } @@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + dto_params.otg_inst = tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; stream_enc->funcs->disable(stream_enc); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 7c9330a61ac1..c7bd7e216710 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 100; - uint32_t in_reset, scratch, i; + uint32_t in_reset, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't send anything during boot. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 59ddc81b5a0e..f6db6f89d45d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -151,7 +151,8 @@ struct dmub_srv; DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn31_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 73b9e0a87e54..20a3d4e23f66 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -127,6 +127,8 @@ struct av_sync_data { static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h index 247c6e9632ba..1cb399dbc7cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_7_PPTABLE_H #define SMU_11_0_7_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_7_TABLE_FORMAT_REVISION 15 @@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings -} __attribute__((packed)); +}; enum SMU_11_0_7_PPCLOCK_ID { SMU_11_0_7_PPCLOCK_GFXCLK = 0, @@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_7_powerplay_table { @@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table struct smu_11_0_7_overdrive_table overdrive_table; PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h index 7a63cf8e85ed..0116e3d04fad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_PPTABLE_H #define SMU_11_0_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_TABLE_FORMAT_REVISION 12 @@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_11_0_PPCLOCK_ID { SMU_11_0_PPCLOCK_GFXCLK = 0, @@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_powerplay_table { @@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table #ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index 3f29f4327378..478862ded0bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_7_PPTABLE_H #define SMU_13_0_7_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_7_TABLE_FORMAT_REVISION 15 //// POWERPLAYTABLE::ulPlatformCaps @@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table struct smu_13_0_7_overdrive_table overdrive_table; uint8_t padding1; PPTable_t smc_pptable; //PPTable_t in driver_if.h -} __attribute__((packed)); +}; +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h index 1f311396b706..043307485528 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_PPTABLE_H #define SMU_13_0_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_TABLE_FORMAT_REVISION 1 //// POWERPLAYTABLE::ulPlatformCaps @@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table { uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_13_0_PPCLOCK_ID { SMU_13_0_PPCLOCK_GFXCLK = 0, @@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table { uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_13_0_powerplay_table { struct atom_common_table_header header; @@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table { #ifndef SMU_13_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 4551bc8a3ecf..f573d582407e 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) } if (bDPExecute) - ast->tx_chip_type = AST_TX_ASTDP; + ast->tx_chip_types |= BIT(AST_TX_ASTDP); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, ASTDP_HOST_EDID_READ_DONE); - } else - ast->tx_chip_type = AST_TX_NONE; + } } diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 204c926a18ea..4f75a9efb610 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev) ast_init_dvo(dev); break; default: - if (ast->tx_chip_type == AST_TX_SIL164) + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) ast_init_dvo(dev); else ast_init_analog(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index afebe35f205e..a34db4380f68 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -73,6 +73,11 @@ enum ast_tx_chip { AST_TX_ASTDP, }; +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -173,7 +178,7 @@ struct ast_private { struct drm_plane primary_plane; struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; - union { + struct { struct { struct drm_encoder encoder; struct ast_vga_connector vga_connector; @@ -199,7 +204,7 @@ struct ast_private { ast_use_defaults } config_mode; - enum ast_tx_chip tx_chip_type; + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d770d5a23c1a..067453266897 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_type = AST_TX_NONE; + ast->tx_chip_types |= AST_TX_NONE_BIT; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) if (!*need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; } if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) { @@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); switch (jreg) { case 0x04: - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; break; case 0x08: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); @@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } fallthrough; case 0x0c: - ast->tx_chip_type = AST_TX_DP501; + ast->tx_chip_types = AST_TX_DP501_BIT; } } else if (ast->chip == AST2600) ast_dp_launch(&ast->base, 0); /* Print stuff for diagnostic purposes */ - switch(ast->tx_chip_type) { - case AST_TX_SIL164: + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) drm_info(dev, "Using Sil164 TMDS transmitter\n"); - break; - case AST_TX_DP501: + if (ast->tx_chip_types & AST_TX_DP501_BIT) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - break; - default: - drm_info(dev, "Analog VGA only\n"); - } + return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 323af2746aa9..db2010a55674 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 1); - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); ast_wait_for_vretrace(ast); ast_dp_set_on_off(crtc->dev, 1); @@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ch = mode; - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 0); - break; - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_set_on_off(crtc->dev, 0); ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); } ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; } } @@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_crtc_load_lut(ast, crtc); //Set Aspeed Display-Port - if (ast->tx_chip_type == AST_TX_ASTDP) + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) ast_dp_set_mode(crtc, vbios_mode_info); mutex_unlock(&ast->ioregs_lock); @@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast) ast_crtc_init(dev); - switch (ast->tx_chip_type) { - case AST_TX_NONE: + if (ast->tx_chip_types & AST_TX_NONE_BIT) { ret = ast_vga_output_init(ast); - break; - case AST_TX_SIL164: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); - break; - case AST_TX_DP501: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); - break; - case AST_TX_ASTDP: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ret = ast_astdp_output_init(ast); - break; + if (ret) + return ret; } - if (ret) - return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 0aa9cf0fb5c3..82fd3c8adee1 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev) ast_init_3rdtx(dev); } else { - if (ast->tx_chip_type != AST_TX_NONE) + if (ast->tx_chip_types & AST_TX_SIL164_BIT) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index b97f6e8f0f6b..01c8b80e34ec 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1267,6 +1267,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, } static +struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, + struct drm_atomic_state *state) +{ + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} + +static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { @@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *crtc; + struct drm_crtc *old_crtc, *new_crtc; + struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; + int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) + new_crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!new_crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; @@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: + old_crtc = analogix_dp_get_old_crtc(dp, old_state); + if (old_crtc) { + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_crtc); + + /* When moving from PSR to fully disabled, exit PSR first. */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr (%d)\n", ret); + } + } + analogix_dp_bridge_disable(bridge); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 2831f0813c3a..ac66f408b40c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) ctx->host_node = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); - if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) { + if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) { ret = -EINVAL; goto err_put_node; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9603193d2fa1..987e4b212e9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state, return drm_atomic_crtc_effectively_active(old_state); /* - * We need to run through the crtc_funcs->disable() function if the CRTC - * is currently on, if it's transitioning to self refresh mode, or if - * it's in self refresh mode and needs to be fully disabled. + * We need to disable bridge(s) and CRTC if we're transitioning out of + * self-refresh and changing CRTCs at the same time, because the + * bridge tracks self-refresh status via CRTC state. + */ + if (old_state->self_refresh_active && + old_state->crtc != new_state->crtc) + return true; + + /* + * We also need to run through the crtc_funcs->disable() function if + * the CRTC is currently on, if it's transitioning to self refresh + * mode, or if it's in self refresh mode and needs to be fully + * disabled. */ return old_state->active || (old_state->self_refresh_active && !new_state->active) || diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 424ea23eec32..16c539657f73 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), DRM_COMPONENT_DRIVER }, { - DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), - DRM_COMPONENT_DRIVER - }, { DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP), DRM_COMPONENT_DRIVER }, { DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI), DRM_COMPONENT_DRIVER }, { + DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), + DRM_COMPONENT_DRIVER + }, { DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI), DRM_COMPONENT_DRIVER }, { diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 9e06f8e2a863..09ce28ee08d9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -26,6 +26,7 @@ #include <drm/drm_print.h> #include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" /* Sysreg registers for MIC */ #define DSD_CFG_MUX 0x1004 @@ -100,9 +101,7 @@ struct exynos_mic { bool i80_mode; struct videomode vm; - struct drm_encoder *encoder; struct drm_bridge bridge; - struct drm_bridge *next_bridge; bool enabled; }; @@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable) writel(reg, mic->reg + MIC_OP); } -static void mic_disable(struct drm_bridge *bridge) { } - static void mic_post_disable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; @@ -297,34 +294,30 @@ unlock: mutex_unlock(&mic_mutex); } -static void mic_enable(struct drm_bridge *bridge) { } - -static int mic_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct exynos_mic *mic = bridge->driver_private; - - return drm_bridge_attach(bridge->encoder, mic->next_bridge, - &mic->bridge, flags); -} - static const struct drm_bridge_funcs mic_bridge_funcs = { - .disable = mic_disable, .post_disable = mic_post_disable, .mode_set = mic_mode_set, .pre_enable = mic_pre_enable, - .enable = mic_enable, - .attach = mic_attach, }; static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + struct drm_encoder *e, *encoder = NULL; + + drm_for_each_encoder(e, drm_dev) + if (e->possible_crtcs == drm_crtc_mask(&crtc->base)) + encoder = e; + if (!encoder) + return -ENODEV; mic->bridge.driver_private = mic; - return 0; + return drm_bridge_attach(encoder, &mic->bridge, NULL, 0); } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_mic *mic; - struct device_node *remote; struct resource res; int ret, i; @@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev) } } - remote = of_graph_get_remote_node(dev->of_node, 1, 0); - mic->next_bridge = of_drm_find_bridge(remote); - if (IS_ERR(mic->next_bridge)) { - DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n"); - ret = PTR_ERR(mic->next_bridge); - goto err; - } - - of_node_put(remote); - platform_set_drvdata(pdev, mic); mic->bridge.funcs = &mic_bridge_funcs; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index c326bd2b444f..30fe847c6664 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } } - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); + /* Reserve enough slots to accommodate composite fences */ + err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 53307ca0eed0..51a0fe60c050 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) { intel_wakeref_t wakeref; + intel_gt_sysfs_unregister(gt); intel_rps_driver_unregister(>->rps); intel_gsc_fini(>->gsc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c index 8ec8bc660c8c..9e4ebf53379b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c @@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj) static struct intel_gt *kobj_to_gt(struct kobject *kobj) { - return container_of(kobj, struct kobj_gt, base)->gt; + return container_of(kobj, struct intel_gt, sysfs_gt); } struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, @@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = { }; ATTRIBUTE_GROUPS(id); +/* A kobject needs a release() method even if it does nothing */ static void kobj_gt_release(struct kobject *kobj) { - kfree(kobj); } static struct kobj_type kobj_gt_type = { @@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = { void intel_gt_sysfs_register(struct intel_gt *gt) { - struct kobj_gt *kg; - /* * We need to make things right with the * ABI compatibility. The files were originally @@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt) if (gt_is_root(gt)) intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt)); - kg = kzalloc(sizeof(*kg), GFP_KERNEL); - if (!kg) + /* init and xfer ownership to sysfs tree */ + if (kobject_init_and_add(>->sysfs_gt, &kobj_gt_type, + gt->i915->sysfs_gt, "gt%d", gt->info.id)) goto exit_fail; - kobject_init(&kg->base, &kobj_gt_type); - kg->gt = gt; - - /* xfer ownership to sysfs tree */ - if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id)) - goto exit_kobj_put; - - intel_gt_sysfs_pm_init(gt, &kg->base); + intel_gt_sysfs_pm_init(gt, >->sysfs_gt); return; -exit_kobj_put: - kobject_put(&kg->base); - exit_fail: + kobject_put(>->sysfs_gt); drm_warn(>->i915->drm, "failed to initialize gt%d sysfs root\n", gt->info.id); } + +void intel_gt_sysfs_unregister(struct intel_gt *gt) +{ + kobject_put(>->sysfs_gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h index 9471b26752cf..a99aa7e8b01a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h @@ -13,11 +13,6 @@ struct intel_gt; -struct kobj_gt { - struct kobject base; - struct intel_gt *gt; -}; - bool is_object_gt(struct kobject *kobj); struct drm_i915_private *kobj_to_i915(struct kobject *kobj); @@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt, const char *name); void intel_gt_sysfs_register(struct intel_gt *gt); +void intel_gt_sysfs_unregister(struct intel_gt *gt); struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, const char *name); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index b06611c1d4ad..edd7a3cf5f5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -224,6 +224,9 @@ struct intel_gt { } mocs; struct intel_pxp pxp; + + /* gt/gtN sysfs */ + struct kobject sysfs_gt; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index d078f884b5e3..f0d7b57b741e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -156,7 +156,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, }; - static const struct uc_fw_platform_requirement *fw_blobs; + const struct uc_fw_platform_requirement *fw_blobs; enum intel_platform p = INTEL_INFO(i915)->platform; u32 fw_count; u8 rev = INTEL_REVID(i915); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 8521daba212a..1e2750210831 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct i915_gpu_coredump *gpu; - ssize_t ret; + ssize_t ret = 0; + + /* + * FIXME: Concurrent clients triggering resets and reading + clearing + * dumps can cause inconsistent sysfs reads when a user calls in with a + * non-zero offset to complete a prior partial read but the + * gpu_coredump has been cleared or replaced. + */ gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { @@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, const char *str = "No error state collected\n"; size_t len = strlen(str); - ret = min_t(size_t, count, len - off); - memcpy(buf, str + off, ret); + if (off < len) { + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } } return ret; @@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) device_remove_bin_file(kdev, &dpf_attrs_1); device_remove_bin_file(kdev, &dpf_attrs); + + kobject_put(dev_priv->sysfs_gt); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4f6db539571a..0bffb70b3c5f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -23,6 +23,7 @@ */ #include <linux/sched/mm.h> +#include <linux/dma-fence-array.h> #include <drm/drm_gem.h> #include "display/intel_frontbuffer.h" @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma, if (unlikely(err)) return err; + /* + * Reserve fences slot early to prevent an allocation after preparing + * the workload and associating fences with dma_resv. + */ + if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { + struct dma_fence *curr; + int idx; + + dma_fence_array_for_each(curr, idx, fence) + ; + err = dma_resv_reserve_fences(vma->obj->base.resv, idx); + if (unlikely(err)) + return err; + } + if (flags & EXEC_OBJECT_WRITE) { struct intel_frontbuffer *front; @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma, i915_active_add_request(&front->write, rq); intel_frontbuffer_put(front); } + } - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; - } + if (fence) { + struct dma_fence *curr; + enum dma_resv_usage usage; + int idx; - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_WRITE); + obj->read_domains = 0; + if (flags & EXEC_OBJECT_WRITE) { + usage = DMA_RESV_USAGE_WRITE; obj->write_domain = I915_GEM_DOMAIN_RENDER; - obj->read_domains = 0; - } - } else { - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; + } else { + usage = DMA_RESV_USAGE_READ; } - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_READ); - obj->write_domain = 0; - } + dma_fence_array_for_each(curr, idx, fence) + dma_resv_add_fence(vma->obj->base.resv, curr, usage); } if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9c8829f945b2..f7863d6dea80 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 7fcbc2a5b6cd..087e69b98d06 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; struct panfrost_job *job; @@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->jc = args->jc; job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); - job->file_priv = file->driver_priv; + job->mmu = file_priv->mmu; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &job->file_priv->sched_entity[slot], + &file_priv->sched_entity[slot], NULL); if (ret) goto out_put_job; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index fda5871aebe3..7c4208476fbd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) return; } - cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); + cfg = panfrost_mmu_as_get(pfdev, job->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, job->jc = 0; } - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); if (signal_fence) @@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, * happen when we receive the DONE interrupt while doing a GPU reset). */ job->jc = 0; - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 77e6d0e6f612..8becc1ba0eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -17,7 +17,7 @@ struct panfrost_job { struct kref refcount; struct panfrost_device *pfdev; - struct panfrost_file_priv *file_priv; + struct panfrost_mmu *mmu; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 75d308ec173d..406e9c324e76 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, return; spin_lock(&bo->bdev->lru_lock); - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); bo->bulk_move = bulk; - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_set_bulk_move); @@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo) { dma_resv_assert_held(bo->base.resv); WARN_ON_ONCE(!kref_read(&bo->kref)); - if (!(bo->pin_count++) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); + ++bo->pin_count; + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_pin); @@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!bo->pin_count)) return; - if (!(--bo->pin_count) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + --bo->pin_count; + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_unpin); diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index a0562ab386f5..e7147e304637 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, ttm_resource_manager_for_each_res(man, &cursor, res) { struct ttm_buffer_object *bo = res->bo; - uint32_t num_pages = PFN_UP(bo->base.size); + uint32_t num_pages; + if (!bo) + continue; + + num_pages = PFN_UP(bo->base.size); ret = ttm_bo_swapout(bo, ctx, gfp_flags); /* ttm_bo_swapout has dropped the lru_lock */ if (!ret) diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 65889b3caf50..20f9adcc3235 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, } /* Add the resource to a bulk_move cursor */ -void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, } /* Remove the resource from a bulk_move range */ -void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, } } +/* Add the resource to a bulk move if the BO is configured for it */ +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_add(bo->bulk_move, res); +} + +/* Remove the resource from a bulk move if the BO is configured for it */ +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_del(bo->bulk_move, res); +} + /* Move a resource to the LRU or bulk tail */ void ttm_resource_move_to_lru_tail(struct ttm_resource *res) { @@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo, res->bus.is_iomem = false; res->bus.caching = ttm_cached; res->bo = bo; - INIT_LIST_HEAD(&res->lru); man = ttm_manager_type(bo->bdev, place->mem_type); spin_lock(&bo->bdev->lru_lock); - man->usage += res->num_pages << PAGE_SHIFT; - if (bo->bulk_move) - ttm_lru_bulk_move_add(bo->bulk_move, res); + if (bo->pin_count) + list_add_tail(&res->lru, &bo->bdev->pinned); else - ttm_resource_move_to_lru_tail(res); + list_add_tail(&res->lru, &man->lru[bo->priority]); + man->usage += res->num_pages << PAGE_SHIFT; spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_init); @@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, place->mem_type); + int ret; + + ret = man->func->alloc(man, bo, place, res_ptr); + if (ret) + return ret; - return man->func->alloc(man, bo, place, res_ptr); + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(*res_ptr, bo); + spin_unlock(&bo->bdev->lru_lock); + return 0; } void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) @@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) if (!*res) return; - if (bo->bulk_move) { - spin_lock(&bo->bdev->lru_lock); - ttm_lru_bulk_move_del(bo->bulk_move, *res); - spin_unlock(&bo->bdev->lru_lock); - } - + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(*res, bo); + spin_unlock(&bo->bdev->lru_lock); man = ttm_manager_type(bo->bdev, (*res)->mem_type); man->func->free(man, *res); *res = NULL; diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 978ee2aab2d4..e0bc73124196 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, if (!input_device->hid_desc) goto cleanup; - input_device->report_desc_size = desc->desc[0].wDescriptorLength; + input_device->report_desc_size = le16_to_cpu( + desc->desc[0].wDescriptorLength); if (input_device->report_desc_size == 0) { input_device->dev_info_status = -EINVAL; goto cleanup; @@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, memcpy(input_device->report_desc, ((unsigned char *)desc) + desc->bLength, - desc->desc[0].wDescriptorLength); + le16_to_cpu(desc->desc[0].wDescriptorLength)); /* Send the ack */ memset(&ack, 0, sizeof(struct mousevsc_prt_msg)); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index b60f13481bdc..5b120402d405 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -21,6 +21,7 @@ #include <linux/cpu.h> #include <linux/hyperv.h> #include <asm/mshyperv.h> +#include <linux/sched/isolation.h> #include "hyperv_vmbus.h" @@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ if (newchannel->offermsg.offer.sub_channel_index == 0) { mutex_unlock(&vmbus_connection.channel_mutex); + cpus_read_unlock(); /* * Don't call free_channel(), because newchannel->kobj * is not initialized yet. @@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel) u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; struct cpumask *allocated_mask; + const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); u32 target_cpu; int numa_node; if (!perf_chn || - !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { + !alloc_cpumask_var(&available_mask, GFP_KERNEL) || + cpumask_empty(hk_mask)) { /* * If the channel is not a performance critical * channel, bind it to VMBUS_CONNECT_CPU. * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. + * If all the cpus are isolated, bind it to + * VMBUS_CONNECT_CPU. */ channel->target_cpu = VMBUS_CONNECT_CPU; if (perf_chn) @@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel) } allocated_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) { +retry: + cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node)); + cpumask_and(available_mask, available_mask, hk_mask); + + if (cpumask_empty(available_mask)) { /* * We have cycled through all the CPUs in the node; * reset the allocated map. */ cpumask_clear(allocated_mask); + goto retry; } - cpumask_xor(available_mask, allocated_mask, - cpumask_of_node(numa_node)); - target_cpu = cpumask_first(available_mask); cpumask_set_cpu(target_cpu, allocated_mask); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index c698592b83e4..d35b60c06114 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy) in_msg = kvp_transaction.kvp_msg; /* - * The key/value strings sent from the host are encoded in + * The key/value strings sent from the host are encoded * in utf16; convert it to utf8 strings. * The host assures us that the utf16 strings will not exceed * the max lengths specified. We will however, reserve room diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 714d549b7b46..547ae334e5cd 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -21,6 +21,7 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/cpu.h> +#include <linux/sched/isolation.h> #include <linux/sched/task_stack.h> #include <linux/delay.h> @@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, if (target_cpu >= nr_cpumask_bits) return -EINVAL; + if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ))) + return -EINVAL; + /* No CPUs should come up or down during this. */ cpus_read_lock(); diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 57e11b2bab74..3633ab691662 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -259,7 +259,7 @@ static const struct ec_board_info board_info[] = { }, { .board_names = { - "ROG CROSSHAIR VIII FORMULA" + "ROG CROSSHAIR VIII FORMULA", "ROG CROSSHAIR VIII HERO", "ROG CROSSHAIR VIII HERO (WI-FI)", }, diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index d78f4bebc718..ea070b91e5b9 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -1228,10 +1228,15 @@ EXPORT_SYMBOL_GPL(occ_setup); void occ_shutdown(struct occ *occ) { + mutex_lock(&occ->lock); + occ_shutdown_sysfs(occ); if (occ->hwmon) hwmon_device_unregister(occ->hwmon); + occ->hwmon = NULL; + + mutex_unlock(&occ->lock); } EXPORT_SYMBOL_GPL(occ_shutdown); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e7d316b1401a..c023b691441e 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare) { int ret; - if (IS_ERR(dev->clk)) - return PTR_ERR(dev->clk); - if (prepare) { /* Optional interface clock */ ret = clk_prepare_enable(dev->pclk); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 70ade5306e45..ba043b547393 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -320,8 +320,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) goto exit_reset; } - dev->clk = devm_clk_get(&pdev->dev, NULL); - if (!i2c_dw_prepare_clk(dev, true)) { + dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(dev->clk)) { + ret = PTR_ERR(dev->clk); + goto exit_reset; + } + + ret = i2c_dw_prepare_clk(dev, true); + if (ret) + goto exit_reset; + + if (dev->clk) { u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index bdecb78bfc26..8e6985354fd5 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1420,17 +1420,22 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); - return ret; + goto err_bulk_unprepare; } i2c_set_adapdata(&i2c->adap, i2c); ret = i2c_add_adapter(&i2c->adap); if (ret) - return ret; + goto err_bulk_unprepare; platform_set_drvdata(pdev, i2c); return 0; + +err_bulk_unprepare: + clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + + return ret; } static int mtk_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 5960ccde6574..aede9d551130 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2372,8 +2372,7 @@ static struct platform_driver npcm_i2c_bus_driver = { static int __init npcm_i2c_init(void) { npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); - platform_driver_register(&npcm_i2c_bus_driver); - return 0; + return platform_driver_register(&npcm_i2c_bus_driver); } module_init(npcm_i2c_init); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b9bb94bd0f67..424ef470223d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +static __always_inline int __intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + unsigned long ecx = 1; /* break on interrupt flag */ + + mwait_idle_with_hints(eax, ecx); + + return index; +} + /** * intel_idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata; static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - struct cpuidle_state *state = &drv->states[index]; - unsigned long eax = flg2MWAIT(state->flags); - unsigned long ecx = 1; /* break on interrupt flag */ + return __intel_idle(dev, drv, index); +} - if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) - local_irq_enable(); +static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int ret; - mwait_idle_with_hints(eax, ecx); + raw_local_irq_enable(); + ret = __intel_idle(dev, drv, index); + raw_local_irq_disable(); - return index; + return ret; } /** @@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) + drv->states[drv->state_count].enter = intel_idle_irq; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 505a032e2786..9dcf3f51f2dd 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -402,6 +402,7 @@ config JOYSTICK_N64 config JOYSTICK_SENSEHAT tristate "Raspberry Pi Sense HAT joystick" depends on INPUT && I2C + depends on HAS_IOMEM select MFD_SIMPLE_MFD_I2C help Say Y here if you want to enable the driver for the diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cbb1599a520e..480476121c01 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, { /* - * Lenovo Yoga Tab2 1051L, something messes with the home-button + * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button * IRQ settings, leading to a non working home-button. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "60073"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1051"), }, }, {} /* Terminating entry */ diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 59a14505b9cd..ca150618d32f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - if (dev->bt_urb) + if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + } + usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 4ab1038b5482..1f23a6be7d88 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -298,7 +298,7 @@ config XTENSA_MX config XILINX_INTC bool "Xilinx Interrupt Controller IP" - depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP + depends on OF select IRQ_DOMAIN help Support for the Xilinx Interrupt Controller IP core. diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 12dd48727a15..5ac83185ff47 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -1035,6 +1035,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) continue; cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); if (WARN_ON(cpu < 0)) continue; @@ -1143,6 +1144,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p for_each_child_of_node(affs, chld) build_fiq_affinity(irqc, chld); } + of_node_put(affs); set_handle_irq(aic_handle_irq); set_handle_fiq(aic_handle_fiq); diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255..38fab02ffe9d 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2be8dea6b6b0..5c1cf907ee68 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1932,7 +1932,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); if (!gic_data.ppi_descs) - return; + goto out_put_node; nr_parts = of_get_child_count(parts_node); @@ -1973,12 +1973,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index aed88857d90f..8d05d8bcf56f 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -39,6 +39,12 @@ #define LIOINTC_ERRATA_IRQ 10 +#if defined(CONFIG_MIPS) +#define liointc_core_id get_ebase_cpunum() +#else +#define liointc_core_id get_csr_cpuid() +#endif + struct liointc_handler_data { struct liointc_priv *priv; u32 parent_int_map; @@ -57,7 +63,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc) struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip_generic *gc = handler->priv->gc; - int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES; + int core = liointc_core_id % LIOINTC_NUM_CORES; u32 pending; chained_irq_enter(chip, desc); diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c index 50a56820c99b..56bf502d9c67 100644 --- a/drivers/irqchip/irq-realtek-rtl.c +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do if (!cpu_ictl) return -EINVAL; ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp); + of_node_put(cpu_ictl); if (ret || tmp != 1) return -EINVAL; - of_node_put(cpu_ictl); cpu_int = be32_to_cpup(imap + 2); if (cpu_int > 7 || cpu_int < 2) diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index 89121b39be26..716b1bb88bf2 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -237,6 +237,7 @@ static const struct of_device_id uniphier_aidet_match[] = { { .compatible = "socionext,uniphier-ld11-aidet" }, { .compatible = "socionext,uniphier-ld20-aidet" }, { .compatible = "socionext,uniphier-pxs3-aidet" }, + { .compatible = "socionext,uniphier-nx1-aidet" }, { /* sentinel */ } }; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index d21648a923ea..54c0473a51dd 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -33,6 +33,14 @@ struct dm_kobject_holder { * access their members! */ +/* + * For mempools pre-allocation at the table loading time. + */ +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + struct mapped_device { struct mutex suspend_lock; @@ -110,8 +118,7 @@ struct mapped_device { /* * io objects are allocated from here. */ - struct bio_set io_bs; - struct bio_set bs; + struct dm_md_mempools *mempools; /* kobject and completion */ struct dm_kobject_holder kobj_holder; diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 06f328928a7f..2dda05aada23 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, /* * Work out how many "unsigned long"s we need to hold the bitset. */ - bitset_size = dm_round_up(region_count, - sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size = dm_round_up(region_count, BITS_PER_LONG); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5e41fbae3f6b..9526ccbedafb 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, false); + md_reap_sync_thread(mddev); } } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) return -EBUSY; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6087cdcaad46..a83b98a8d2a9 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq, { int r; - r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, + r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, dm_rq_bio_constructor, tio); if (r) return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0e833a154b31..bd539afbfe88 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * return 0; } -void dm_table_free_md_mempools(struct dm_table *t) -{ - dm_free_md_mempools(t->mempools); - t->mempools = NULL; -} - -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) -{ - return t->mempools; -} - static int setup_indexes(struct dm_table *t) { int i; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dfb0a551bd88..b6b25d319ef7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -136,14 +136,6 @@ static int get_swap_bios(void) return latch; } -/* - * For mempools pre-allocation at the table loading time. - */ -struct dm_md_mempools { - struct bio_set bs; - struct bio_set io_bs; -}; - struct table_device { struct list_head list; refcount_t count; @@ -563,6 +555,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone) unsigned long flags; /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ spin_lock_irqsave(&io->lock, flags); + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { + spin_unlock_irqrestore(&io->lock, flags); + return; + } dm_io_set_flag(io, DM_IO_ACCOUNTED); spin_unlock_irqrestore(&io->lock, flags); } @@ -581,7 +577,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs); + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); /* Set default bdev, but target must bio_set_dev() before issuing IO */ clone->bi_bdev = md->disk->part0; @@ -628,7 +624,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, } else { struct mapped_device *md = ci->io->md; - clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs); + clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, + &md->mempools->bs); if (!clone) return NULL; /* Set default bdev, but target must bio_set_dev() before issuing IO */ @@ -718,18 +715,18 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) } static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, struct bio *bio) + int *srcu_idx, unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) return dm_get_live_table_fast(md); else return dm_get_live_table(md, srcu_idx); } static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - struct bio *bio) + unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) dm_put_live_table_fast(md); else dm_put_live_table(md, srcu_idx); @@ -1023,23 +1020,19 @@ static void clone_endio(struct bio *bio) struct dm_io *io = tio->io; struct mapped_device *md = io->md; - if (likely(bio->bi_bdev != md->disk->part0)) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - - if (unlikely(error == BLK_STS_TARGET)) { - if (bio_op(bio) == REQ_OP_DISCARD && - !bdev_max_discard_sectors(bio->bi_bdev)) - disable_discard(md); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !q->limits.max_write_zeroes_sectors) - disable_write_zeroes(md); - } - - if (static_branch_unlikely(&zoned_enabled) && - unlikely(blk_queue_is_zoned(q))) - dm_zone_endio(io, bio); + if (unlikely(error == BLK_STS_TARGET)) { + if (bio_op(bio) == REQ_OP_DISCARD && + !bdev_max_discard_sectors(bio->bi_bdev)) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bdev_write_zeroes_sectors(bio->bi_bdev)) + disable_write_zeroes(md); } + if (static_branch_unlikely(&zoned_enabled) && + unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) + dm_zone_endio(io, bio); + if (endio) { int r = endio(ti, bio, &error); switch (r) { @@ -1620,7 +1613,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ti = dm_table_find_target(ci->map, ci->sector); if (unlikely(!ti)) return BLK_STS_IOERR; - else if (unlikely(ci->is_abnormal_io)) + + if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && + unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); /* @@ -1722,8 +1720,9 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; + unsigned bio_opf = bio->bi_opf; - map = dm_get_live_table_bio(md, &srcu_idx, bio); + map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); /* If suspended, or map not yet available, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || @@ -1739,7 +1738,7 @@ static void dm_submit_bio(struct bio *bio) dm_split_and_process_bio(md, map, bio); out: - dm_put_live_table_bio(md, srcu_idx, bio); + dm_put_live_table_bio(md, srcu_idx, bio_opf); } static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, @@ -1876,8 +1875,7 @@ static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); + dm_free_md_mempools(md->mempools); if (md->dax_dev) { dax_remove_host(md->disk); @@ -2049,48 +2047,6 @@ static void free_dev(struct mapped_device *md) kvfree(md); } -static int __bind_mempools(struct mapped_device *md, struct dm_table *t) -{ - struct dm_md_mempools *p = dm_table_get_md_mempools(t); - int ret = 0; - - if (dm_table_bio_based(t)) { - /* - * The md may already have mempools that need changing. - * If so, reload bioset because front_pad may have changed - * because a different table was loaded. - */ - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); - - } else if (bioset_initialized(&md->bs)) { - /* - * There's no need to reload with request-based dm - * because the size of front_pad doesn't change. - * Note for future: If you are to reload bioset, - * prep-ed requests in the queue may refer - * to bio from the old bioset, so you must walk - * through the queue to unprep. - */ - goto out; - } - - BUG_ON(!p || - bioset_initialized(&md->bs) || - bioset_initialized(&md->io_bs)); - - ret = bioset_init_from_src(&md->bs, &p->bs); - if (ret) - goto out; - ret = bioset_init_from_src(&md->io_bs, &p->io_bs); - if (ret) - bioset_exit(&md->bs); -out: - /* mempool bind completed, no longer need any mempools in the table */ - dm_table_free_md_mempools(t); - return ret; -} - /* * Bind a table to the device. */ @@ -2144,12 +2100,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); - } - ret = __bind_mempools(md, t); - if (ret) { - old_map = ERR_PTR(ret); - goto out; + /* + * There is no need to reload with request-based dm because the + * size of front_pad doesn't change. + * + * Note for future: If you are to reload bioset, prep-ed + * requests in the queue may refer to bio from the old bioset, + * so you must walk through the queue to unprep. + */ + if (!md->mempools) { + md->mempools = t->mempools; + t->mempools = NULL; + } + } else { + /* + * The md may already have mempools that need changing. + * If so, reload bioset because front_pad may have changed + * because a different table was loaded. + */ + dm_free_md_mempools(md->mempools); + md->mempools = t->mempools; + t->mempools = NULL; } ret = dm_table_set_restrictions(t, md->queue, limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 3f89664fea01..a8405ce305a9 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); -void dm_table_free_md_mempools(struct dm_table *t); -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8273ac5eef06..c7ecb0bffda0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } mddev_unlock(mddev); } @@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } del_timer_sync(&mddev->safemode_timer); @@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev) * ->spare_active and clear saved_raid_disk */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); @@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev) goto unlock; } if (mddev->sync_thread) { - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); goto unlock; } /* Set RUNNING before clearing NEEDED to avoid @@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev) } EXPORT_SYMBOL(md_check_recovery); -void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held) +void md_reap_sync_thread(struct mddev *mddev) { struct md_rdev *rdev; sector_t old_dev_sectors = mddev->dev_sectors; bool is_reshaped = false; - if (reconfig_mutex_held) - mddev_unlock(mddev); /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); - if (reconfig_mutex_held) - mddev_lock_nointr(mddev); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev->degraded != mddev->raid_disks) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f62c46ac2d3..cf2cbb17acbd 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread( extern void md_unregister_thread(struct md_thread **threadp); extern void md_wakeup_thread(struct md_thread *thread); extern void md_check_recovery(struct mddev *mddev); -extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held); +extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 973e2e06f19c..0a2e4806b1ec 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io) if (bdev) { struct bio *bio; - bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, + bio = bio_alloc_bioset(bdev, 0, REQ_OP_WRITE | REQ_PREFLUSH, - &ppl_conf->flush_bs); + GFP_NOIO, &ppl_conf->flush_bs); bio->bi_private = io; bio->bi_end_io = ppl_flush_endio; diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index d6cd5537126c..69f9b0336410 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev) clk_disable_unprepare(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); - if (!ssc->irq) { + if (ssc->irq < 0) { dev_dbg(&pdev->dev, "could not get irq\n"); - return -ENXIO; + return ssc->irq; } mutex_lock(&user_lock); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 749cc5a46d13..b1e76030cafd 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -407,6 +407,8 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr) // default setting_reg1 = PCR_SETTING_REG1; setting_reg2 = PCR_SETTING_REG2; + } else { + return; } pci_read_config_dword(pdev, setting_reg2, &lval2); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 8d169a35cf13..c9c56fd194c1 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -79,6 +79,11 @@ static int at25_ee_read(void *priv, unsigned int offset, { struct at25_data *at25 = priv; char *buf = val; + size_t max_chunk = spi_max_transfer_size(at25->spi); + size_t num_msgs = DIV_ROUND_UP(count, max_chunk); + size_t nr_bytes = 0; + unsigned int msg_offset; + size_t msg_count; u8 *cp; ssize_t status; struct spi_transfer t[2]; @@ -92,54 +97,59 @@ static int at25_ee_read(void *priv, unsigned int offset, if (unlikely(!count)) return -EINVAL; - cp = at25->command; + msg_offset = (unsigned int)offset; + msg_count = min(count, max_chunk); + while (num_msgs) { + cp = at25->command; - instr = AT25_READ; - if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= BIT(at25->addrlen * 8)) - instr |= AT25_INSTR_BIT3; + instr = AT25_READ; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (msg_offset >= BIT(at25->addrlen * 8)) + instr |= AT25_INSTR_BIT3; - mutex_lock(&at25->lock); + mutex_lock(&at25->lock); - *cp++ = instr; - - /* 8/16/24-bit address is written MSB first */ - switch (at25->addrlen) { - default: /* case 3 */ - *cp++ = offset >> 16; - fallthrough; - case 2: - *cp++ = offset >> 8; - fallthrough; - case 1: - case 0: /* can't happen: for better code generation */ - *cp++ = offset >> 0; - } + *cp++ = instr; - spi_message_init(&m); - memset(t, 0, sizeof(t)); + /* 8/16/24-bit address is written MSB first */ + switch (at25->addrlen) { + default: /* case 3 */ + *cp++ = msg_offset >> 16; + fallthrough; + case 2: + *cp++ = msg_offset >> 8; + fallthrough; + case 1: + case 0: /* can't happen: for better code generation */ + *cp++ = msg_offset >> 0; + } - t[0].tx_buf = at25->command; - t[0].len = at25->addrlen + 1; - spi_message_add_tail(&t[0], &m); + spi_message_init(&m); + memset(t, 0, sizeof(t)); - t[1].rx_buf = buf; - t[1].len = count; - spi_message_add_tail(&t[1], &m); + t[0].tx_buf = at25->command; + t[0].len = at25->addrlen + 1; + spi_message_add_tail(&t[0], &m); - /* - * Read it all at once. - * - * REVISIT that's potentially a problem with large chips, if - * other devices on the bus need to be accessed regularly or - * this chip is clocked very slowly. - */ - status = spi_sync(at25->spi, &m); - dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n", - count, offset, status); + t[1].rx_buf = buf + nr_bytes; + t[1].len = msg_count; + spi_message_add_tail(&t[1], &m); - mutex_unlock(&at25->lock); - return status; + status = spi_sync(at25->spi, &m); + + mutex_unlock(&at25->lock); + + if (status) + return status; + + --num_msgs; + msg_offset += msg_count; + nr_bytes += msg_count; + } + + dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n", + count, offset); + return 0; } /* Read extra registers as ID or serial number */ @@ -190,6 +200,7 @@ ATTRIBUTE_GROUPS(sernum); static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) { struct at25_data *at25 = priv; + size_t maxsz = spi_max_transfer_size(at25->spi); const char *buf = val; int status = 0; unsigned buf_size; @@ -253,6 +264,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) segment = buf_size - (offset % buf_size); if (segment > count) segment = count; + if (segment > maxsz) + segment = maxsz; memcpy(cp, buf, segment); status = spi_write(at25->spi, bounce, segment + at25->addrlen + 1); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index cebcca6d6d3e..cf2b8261da14 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CAP_SETUP) { - if (dev->dev_state == MEI_DEV_POWER_DOWN) { + if (dev->dev_state == MEI_DEV_POWER_DOWN || + dev->dev_state == MEI_DEV_POWERING_DOWN) { dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); return 0; } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 64ce3f830262..15e8e2b322b1 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,6 +109,8 @@ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ #define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ +#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 9870bf717979..befa491e3344 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1154,6 +1154,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) ret = mei_me_d0i3_exit_sync(dev); if (ret) return ret; + } else { + hw->pg_state = MEI_PG_OFF; } } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 33e58821e478..5435604327a7 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -116,6 +116,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1259ca22d625..f4a1281658db 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 1499a64ec3aa..f13c08db3da5 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip) struct sdhci_host *host = slot->host; u16 clock; + if (host->mmc->ios.power_mode != MMC_POWER_ON) + return 0; + clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL); clock |= SDHCI_CLOCK_PLL_EN; diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ebee5f07a208..be2719a3ba70 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -51,6 +51,7 @@ static char *status_str[] = { }; static char *type_str[] = { + "", /* Type 0 is not defined */ "AMT_MSG_DISCOVERY", "AMT_MSG_ADVERTISEMENT", "AMT_MSG_REQUEST", @@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) struct amt_header_advertisement *amta; int hdr_size; - hdr_size = sizeof(*amta) - sizeof(struct amt_header); - + hdr_size = sizeof(*amta) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) + return true; + amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); if (amtmd->reserved || amtmd->version) return true; - hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) return true; + skb_reset_network_header(skb); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = eth_hdr(skb); + + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; iph = ip_hdr(skb); + if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) return true; @@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) } else if (iph->version == 6) { struct ipv6hdr *ip6h; + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return true; + ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; @@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct iphdr *iph; int hdr_size, len; - hdr_size = sizeof(*amtmq) - sizeof(struct amt_header); - + hdr_size = sizeof(*amtmq) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true; - hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth); + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; + oeth = eth_hdr(skb); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); eth = eth_hdr(skb); + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { - if (!ipv4_is_multicast(iph->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; + if (!ipv4_is_multicast(iph->daddr)) + return true; + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); @@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld2_query *mld2q; + struct ipv6hdr *ip6h; - if (!ipv6_addr_is_multicast(&ip6h->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); @@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_update *amtmu; struct amt_tunnel_list *tunnel; - struct udphdr *udph; struct ethhdr *eth; struct iphdr *iph; - int len; + int len, hdr_size; iph = ip_hdr(skb); - udph = udp_hdr(skb); - if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol, - false, false)) + hdr_size = sizeof(*amtmu) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) return true; - amtmu = (struct amt_header_membership_update *)skb->data; + amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1); if (amtmu->reserved || amtmu->version) return true; - skb_pull(skb, sizeof(*amtmu)); + if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false)) + return true; + skb_reset_network_header(skb); list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { @@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) return true; report: + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { if (ip_mc_check_igmp(skb)) { @@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) amt = rcu_dereference_sk_user_data(sk); if (!amt) { err = true; - goto drop; + kfree_skb(skb); + goto out; } skb->dev = amt->dev; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 8af4def38a98..e531b93f3cb2 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], gphy_fw_np, i); - if (err) + if (err) { + of_node_put(gphy_fw_np); goto remove_gphy; + } i++; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 7b37d45bc9fb..d94150d8f3f4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 ctrl, u16 status, u16 lpa, + u16 bmsr, u16 lpa, u16 status, struct phylink_link_state *state) { + state->link = false; + + /* If the BMSR reports that the link had failed, report this to + * phylink. + */ + if (!(bmsr & BMSR_LSTATUS)) + return 0; + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { /* The Spped and Duplex Resolved register is 1 if AN is enabled * and complete, or if AN is disabled. So with disabled AN we - * still get here on link up. But we want to set an_complete - * only if AN was enabled, thus we look at BMCR_ANENABLE. - * (According to 802.3-2008 section 22.2.4.2.10, we should be - * able to get this same value from BMSR_ANEGCAPABLE, but tests - * show that these Marvell PHYs don't conform to this part of - * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + * still get here on link up. */ - state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; - err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &ctrl); + MV88E6390_SGMII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 3bb42a9f236d..769f672e9128 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port, return 0; } -static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, - phy_interface_t interface) -{ - int ext_int; - - ext_int = rtl8365mb_extint_port_map[port]; - - if (ext_int < 0 && - (interface == PHY_INTERFACE_MODE_NA || - interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_GMII)) - /* Internal PHY */ - return true; - else if ((ext_int >= 1) && - phy_interface_mode_is_rgmii(interface)) - /* Extension MAC */ - return true; - - return false; -} - static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { - if (dsa_is_user_port(ds, port)) + if (dsa_is_user_port(ds, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); - else if (dsa_is_cpu_port(ds, port)) + + /* GMII is the default interface mode for phylib, so + * we have to support it for ports with integrated PHY. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + } else if (dsa_is_cpu_port(ds, port)) { phy_interface_set_rgmii(config->supported_interfaces); + } config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; @@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, struct realtek_priv *priv = ds->priv; int ret; - if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { - dev_err(priv->dev, "phy mode %s is unsupported on port %d\n", - phy_modes(state->interface), port); - return; - } - if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { dev_err(priv->dev, "port %d supports only conventional PHY or fixed-link\n", diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a3816264c35c..8c5828582c21 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio = mdiobus_alloc(); if (mdio == NULL) { netdev_err(dev, "Error allocating MDIO bus\n"); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } mdio->name = ALTERA_TSE_RESOURCE_NAME; @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->id); goto out_free_mdio; } + of_node_put(mdio_node); if (netif_msg_drv(priv)) netdev_info(dev, "MDIO bus %s: created\n", mdio->id); @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) out_free_mdio: mdiobus_free(mdio); mdio = NULL; +put_node: + of_node_put(mdio_node); return ret; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c6f003975621..d5f2c6989221 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev) pr_cont("\n"); } } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE; aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); wmb(); /* drain writebuffer */ @@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) ps->tx_packets++; ps->tx_bytes += ptxd->len; - ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE; wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); @@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); + aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; @@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev) for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i; + pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i; pDB++; } aup->pDBfree = pDBfree; @@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->rx_db_inuse[i] = pDB; } @@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } @@ -1310,7 +1310,7 @@ err_remap2: iounmap(aup->mac); err_remap1: dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); err_vaddr: free_netdev(dev); err_alloc: @@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); iounmap(aup->macdma); iounmap(aup->mac); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index e3a3ed29db61..2489c2f4fd8a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,8 +106,8 @@ struct au1000_private { struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ void __iomem *macdma; /* base of MAC DMA port */ - u32 vaddr; /* virtual address of rx/tx buffers */ - dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + void *vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ spinlock_t lock; /* Serialise access to device */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a3593290886f..4d46780fad13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); - netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto)); for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4ebd2410185a..4d790a89fe77 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; - phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; + phy_irqnum = platform_irq_count(pdev) - 1; dma_irqnum = 1; dma_irqend = phy_irqnum; } else { @@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) phy_memnum = 0; phy_irqnum = 0; dma_irqnum = 1; - dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ); + dma_irqend = platform_irq_count(pdev); } /* Obtain the mmio areas for the device */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 086739e4f40a..9b83d5361699 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) np = of_get_child_by_name(core->dev.of_node, "mdio"); err = of_mdiobus_register(mii_bus, np); + of_node_put(np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index e6f48786949c..02bd3cf9a260 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core) bcma_mdio_mii_unregister(bgmac->mii_bus); bgmac_enet_remove(bgmac); bcma_set_drvdata(core, NULL); - kfree(bgmac); } static struct bcma_driver bgmac_bcma_driver = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8a3a446219f7..94f80e1c4020 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -769,6 +769,7 @@ struct hnae3_tc_info { u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ u16 tqp_count[HNAE3_MAX_TC]; u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6d20974519fe..4c7988e308a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev, if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { - netdev_info(ndev, "ringparam not changed\n"); + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); return false; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1ebad0e50e6a..fae79764dc44 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - int speed = HCLGE_MAC_SPEED_UNKNOWN; + int speed; int ret; /* get the port info from SFP cmd if not copper port */ @@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (!hdev->support_sfp_query) return 0; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; ret = hclge_get_sfp_info(hdev, mac); - else + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; ret = hclge_get_sfp_speed(hdev, &speed); + } if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; @@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + ret = hclge_push_vf_link_status(vport); if (ret) { vport->vf_info.link_state = link_state_old; @@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, if (ret) return ret; + vport->port_base_vlan_cfg.tbl_sta = false; /* remove old VLAN tag */ if (old_info->vlan_tag == 0) ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1f87a8a3fe32..2f33b036a47a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, - u16 qs_id, u8 pri) +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; @@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, map->qs_id = cpu_to_le16(qs_id); map->priority = pri; - map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; @@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { + kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; @@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - hdev->rss_cfg.rss_size = kinfo->rss_size; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { - u8 i; + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } - for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; - hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; - hdev->tm_info.tc_info[i].bw_limit = - hdev->tm_info.pg_info[0].bw_limit; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) @@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->tc_info.num_tc; i++) { + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - i); + pri, link_vld); if (ret) return ret; } @@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - k); + k, true); if (ret) return ret; } @@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; - u32 shaper_para; + u32 shaper_para_c, shaper_para_p; int ret; u32 i; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; - ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, - &ir_para, max_tm_rate); - if (ret) - return ret; + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } - shaper_para = hclge_tm_get_shapping_para(0, 0, 0, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para, rate); + shaper_para_c, rate); if (ret) return ret; - shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, - ir_para.ir_u, - ir_para.ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para, rate); + shaper_para_p, rate); if (ret) return ret; } @@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) int ret; u32 i, k; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; @@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, - vport[k].dwrr); + dwrr); if (ret) return ret; } @@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; + u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); @@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, - HCLGE_SCH_MODE_DWRR); + mode); if (ret) return ret; } @@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 619cc30a2dfc..d943943912f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 610f00cbaff9..19704f5c8291 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev, set_bit(__I40E_TESTING, pf->state); + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + dev_warn(&pf->pdev->dev, + "Cannot start offline testing when PF is in reset state.\n"); + goto skip_ol_tests; + } + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); - data[I40E_ETH_TEST_REG] = 1; - data[I40E_ETH_TEST_EEPROM] = 1; - data[I40E_ETH_TEST_INTR] = 1; - data[I40E_ETH_TEST_LINK] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 0; } -skip_ol_tests: - netif_info(pf, drv, netdev, "testing finished\n"); + return; + +skip_ol_tests: + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, pf->state); + netif_info(pf, drv, netdev, "testing failed\n"); } static void i40e_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 332a608dbaa6..72576bb3e94d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8542,6 +8542,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EOPNOTSUPP; } + if (!tc) { + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2606e8f0f19b..033ea71763e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - for (i = 0; i < I40E_MAX_VF_VSI; i++) + for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { aq_ret = I40E_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 7dfcf78b57fb..f3ecb3bca33d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -984,7 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; f->is_new_mac = true; - f->is_primary = false; + f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e1cae253412c..c1ac2f746714 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5763,25 +5763,38 @@ static netdev_features_t ice_fix_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); - netdev_features_t supported_vlan_filtering; - netdev_features_t requested_vlan_filtering; - struct ice_vsi *vsi = np->vsi; - - requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES; - - /* make sure supported_vlan_filtering works for both SVM and DVM */ - supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER; - if (ice_is_dvm_ena(&vsi->back->hw)) - supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER; - - if (requested_vlan_filtering && - requested_vlan_filtering != supported_vlan_filtering) { - if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n"); - features |= supported_vlan_filtering; + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; + + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + if (req_vlan_fltr != cur_vlan_fltr) { + if (ice_is_dvm_ena(&np->vsi->back->hw)) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else if ((!cur_ctag && req_ctag && !cur_stag) || + (!cur_stag && req_stag && !cur_ctag)) { + features |= NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); + } else if ((cur_ctag && !req_ctag && cur_stag) || + (cur_stag && !req_stag && cur_ctag)) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); + } } else { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n"); - features &= ~supported_vlan_filtering; + if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) + netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); + + if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; } } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 662947c882e8..ef9344ef0d8e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -2271,7 +2271,7 @@ static int ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->quad = port / ICE_PORTS_PER_QUAD; - tx->quad_offset = tx->quad * INDEX_PER_PORT; + tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT; tx->len = INDEX_PER_PORT; return ice_ptp_alloc_tx_tracker(tx); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index afd048d69959..10e396abf130 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -49,6 +49,37 @@ struct ice_perout_channel { * To allow multiple ports to access the shared register block independently, * the blocks are split up so that indexes are assigned to each port based on * hardware logical port number. + * + * The timestamp blocks are handled differently for E810- and E822-based + * devices. In E810 devices, each port has its own block of timestamps, while in + * E822 there is a need to logically break the block of registers into smaller + * chunks based on the port number to avoid collisions. + * + * Example for port 5 in E810: + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * |register|register|register|register|register|register|register|register| + * | block | block | block | block | block | block | block | block | + * | for | for | for | for | for | for | for | for | + * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 | + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * ^^ + * || + * |--- quad offset is always 0 + * ---- quad number + * + * Example for port 5 in E822: + * +-----------------------------+-----------------------------+ + * | register block for quad 0 | register block for quad 1 | + * |+------+------+------+------+|+------+------+------+------+| + * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3|| + * |+------+------+------+------+|+------+------+------+------+| + * +-----------------------------+-------^---------------------+ + * ^ | + * | --- quad offset* + * ---- quad number + * + * * PHY port 5 is port 1 in quad 1 + * */ /** diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index cd8e6b50968c..7adf9ddf129e 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) } if (ice_is_vf_disabled(vf)) { + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return -EINVAL; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1d9b84c3937a..4547bc1f7cee 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1569,35 +1569,27 @@ error_param: */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - int i, q_idx; + int i = -1, q_idx; - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; - } - if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) goto error_param; - } vsi = ice_get_vf_vsi(vf); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!vsi) goto error_param; - } if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) * for selected "vsi" */ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->tx_rings[i]->count = qpi->txq.ring_len; /* Disable any existing queue first */ - if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) goto error_param; - } /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", + vf->vf_id, i); goto error_param; } } @@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || - qpi->rxq.databuffer_size < 1024)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.databuffer_size < 1024)) goto error_param; - } vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; if (qpi->rxq.max_pkt_size > max_frame_size || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.max_pkt_size < 64) goto error_param; - } vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is @@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", + vf->vf_id, i); goto error_param; } } } + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); error_param: + /* disable whatever we can */ + for (; i >= 0; i--) { + if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n", + vf->vf_id, i); + if (ice_vf_vsi_dis_single_txq(vf, vsi, i)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", + vf->vf_id, i); + } + /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, - NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f11c0a8e7a9..d4e63f0644c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EPERM; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index bc614a4def9e..3f60a80e34c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3b3c079a0fa..59c9a10f83ba 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, return true; } +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { @@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); if (unlikely(!new_data)) { netdev->stats.rx_dropped++; goto release_desc; @@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + ring->data[i] = netdev_alloc_frag(ring->frag_size); + else + ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); if (!ring->data[i]) return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ed5038d98ef6..6400a827173c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); - return 0; + return ret; } i += ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 0eb9d74547f8..50422b56a64d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other) return pci_get_drvdata(to_pci_dev(other)); } -static int next_phys_dev(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - return _next_phys_dev(mdev, data); -} - static int next_phys_dev_lag(struct device *dev, const void *data) { struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; @@ -624,13 +613,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, } /* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev); -} - -/* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) { lockdep_assert_held(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index eae9aa9c0811..978a2bb8e122 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) if (!tracer->owner) return; + if (unlikely(!tracer->str_db.loaded)) + goto arm; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) &tmp_trace_block[TRACES_PER_BLOCK - 1]); } +arm: mlx5_fw_tracer_arm(dev); } @@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: - if (likely(tracer->str_db.loaded)) - queue_work(tracer->work_queue, &tracer->handle_traces_work); + queue_work(tracer->work_queue, &tracer->handle_traces_work); break; default: mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 68364484a435..3c1edfa33aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 43a536cb81db..c0f409c195bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,11 +38,12 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index eb90e79388f1..f797fd97d305 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -950,6 +950,13 @@ err_event_reg: return err; } +static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +{ + mlx5e_rep_tc_netdevice_event_unregister(rpriv); + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); +} + static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - err = mlx5e_tc_ht_init(&rpriv->tc_ht); - if (err) - goto err_ht_init; - if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) goto err_init_tx; } + err = mlx5e_tc_ht_init(&rpriv->tc_ht); + if (err) + goto err_ht_init; + return 0; -err_init_tx: - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); err_ht_init: + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) + mlx5e_cleanup_uplink_rep_tx(rpriv); +err_init_tx: mlx5e_destroy_tises(priv); return err; } -static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) -{ - mlx5e_rep_tc_netdevice_event_unregister(rpriv); - mlx5e_rep_bond_cleanup(rpriv); - mlx5e_rep_tc_cleanup(rpriv); -} - static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_destroy_tises(priv); + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); + mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 217cac29057f..2ce3728576d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event, switch (event) { case ESW_OFFLOADS_DEVCOM_PAIR: - if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) - break; - if (mlx5_eswitch_vport_match_metadata_enabled(esw) != mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; @@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_register_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_esw_offloads_devcom_event, @@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, ESW_OFFLOADS_DEVCOM_UNPAIR, esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdcf7f529330..21e5c709b2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } -static bool check_conflicting_actions(u32 action1, u32 action2) +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, + const struct mlx5_fs_vlan *vlan1) { - u32 xored_actions = action1 ^ action2; + return vlan0->ethtype != vlan1->ethtype || + vlan0->vid != vlan1->vid || + vlan0->prio != vlan1->prio; +} + +static bool check_conflicting_actions(const struct mlx5_flow_act *act1, + const struct mlx5_flow_act *act2) +{ + u32 action1 = act1->action; + u32 action2 = act2->action; + u32 xored_actions; + + xored_actions = action1 ^ action2; /* if one rule only wants to count, it's ok */ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || @@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && + act1->pkt_reformat != act2->pkt_reformat) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + act1->modify_hdr != act2->modify_hdr) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) + return true; + return false; } @@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action.action)) { + if (check_conflicting_actions(flow_act, &fte->action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 552b6e26e701..2a8fc547eb37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct lag_tracker tracker; + struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 72f70fad4641..c81b173156d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -74,6 +74,16 @@ struct mlx5_lag { struct lag_mpesw lag_mpesw; }; +static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) + return false; + return true; +} + static inline struct mlx5_lag * mlx5_lag_dev(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 484cb1e4fc7f..9cc7afea2758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h index a68d931090dd..15c8d4de8350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -8,8 +8,8 @@ #include "spectrum.h" enum mlxsw_sp_counter_sub_pool_id { - MLXSW_SP_COUNTER_SUB_POOL_FLOW, MLXSW_SP_COUNTER_SUB_POOL_RIF, + MLXSW_SP_COUNTER_SUB_POOL_FLOW, }; int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 443a5d6eb57b..7c31a46195b2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv6); } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { map[FLOW_PAY_GRE] = key_size; if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) @@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv4_gre_tun); } - if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { - map[FLOW_PAY_QINQ] = key_size; - key_size += sizeof(struct nfp_flower_vlan); - } - if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { map[FLOW_PAY_UDP_TUN] = key_size; @@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { offset = key_map[FLOW_PAY_GRE]; key = kdata + offset; @@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } - if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { - offset = key_map[FLOW_PAY_QINQ]; - key = kdata + offset; - msk = mdata + offset; - for (i = 0; i < _CT_TYPE_MAX; i++) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, - (struct nfp_flower_vlan *)msk, - rules[i]); - } - } - if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { offset = key_map[FLOW_PAY_UDP_TUN]; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 193a167a6762..e01430139b6d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_ipv6); } + if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, + (struct nfp_flower_vlan *)msk, + rule); + ext += sizeof(struct nfp_flower_vlan); + msk += sizeof(struct nfp_flower_vlan); + } + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { struct nfp_flower_ipv6_gre_tun *gre_match; @@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } } - if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, - (struct nfp_flower_vlan *)msk, - rule); - ext += sizeof(struct nfp_flower_vlan); - msk += sizeof(struct nfp_flower_vlan); - } - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 54af30961351..6eeeb0fda91f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -15,7 +15,7 @@ #include "nfp_net_sriov.h" static int -nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) +nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { u16 cap_vf; @@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); if ((cap_vf & cap) != cap) { - nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); + if (warn) + nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); return -EOPNOTSUPP; } if (vf < 0 || vf >= app->pf->num_vfs) { - nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); + if (warn) + nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); return -EINVAL; } @@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) unsigned int vf_offset; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) return err; @@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, u32 vlan_tag; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true); if (err) return err; @@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, } /* Check if fw supports or not */ - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true); if (err) is_proto_sup = false; @@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, u32 vf_offset, ratevalue; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true); if (err) return err; @@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, - "spoofchk"); + "spoofchk", true); if (err) return err; @@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, - "trust"); + "trust", true); if (err) return err; @@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, - "link_state"); + "link_state", true); if (err) return err; @@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, u8 flags; int err; - err = nfp_net_sriov_check(app, vf, 0, ""); + err = nfp_net_sriov_check(app, vf, 0, "", true); if (err) return err; @@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); - if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false)) ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false); if (!err) { rate = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index f9f80933e0c9..38fe77d1035e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - goto err_dvr_probe; + goto err_alloc_irq; } return 0; -err_dvr_probe: - pci_free_irq_vectors(pdev); err_alloc_irq: clk_disable_unprepare(plat->stmmac_clk); clk_unregister_fixed_rate(plat->stmmac_clk); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 4225efbeda3d..f2e2261b4b7d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset, iowrite32(value, lp->regs + offset); } +/** + * axienet_dma_out32 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ + +static inline void axienet_dma_out32(struct axienet_local *lp, + off_t reg, u32 value) +{ + iowrite32(value, lp->dma_regs + reg); +} + +#if defined(CONFIG_64BIT) && defined(iowrite64) +/** + * axienet_dma_out64 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ +static inline void axienet_dma_out64(struct axienet_local *lp, + off_t reg, u64 value) +{ + iowrite64(value, lp->dma_regs + reg); +} + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + if (lp->features & XAE_FEATURE_DMA_64BIT) + axienet_dma_out64(lp, reg, addr); + else + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#else /* CONFIG_64BIT */ + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#endif /* CONFIG_64BIT */ + /* Function prototypes visible in xilinx_axienet_mdio.c for other files */ int axienet_mdio_enable(struct axienet_local *lp); void axienet_mdio_disable(struct axienet_local *lp); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 93c9f305bba4..1760930ec0c4 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) return ioread32(lp->dma_regs + reg); } -/** - * axienet_dma_out32 - Memory mapped Axi DMA register write. - * @lp: Pointer to axienet local structure - * @reg: Address offset from the base address of the Axi DMA core - * @value: Value to be written into the Axi DMA register - * - * This function writes the desired value into the corresponding Axi DMA - * register. - */ -static inline void axienet_dma_out32(struct axienet_local *lp, - off_t reg, u32 value) -{ - iowrite32(value, lp->dma_regs + reg); -} - -static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, - dma_addr_t addr) -{ - axienet_dma_out32(lp, reg, lower_32_bits(addr)); - - if (lp->features & XAE_FEATURE_DMA_64BIT) - axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); -} - static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, struct axidma_bd *desc) { @@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev) iowrite32(0x0, desc); } } + if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { + dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); + ret = -EINVAL; + goto cleanup_clk; + } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8561f2d4443b..13dafe7a29bd 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -137,6 +137,7 @@ #define DP83867_DOWNSHIFT_2_COUNT 2 #define DP83867_DOWNSHIFT_4_COUNT 4 #define DP83867_DOWNSHIFT_8_COUNT 8 +#define DP83867_SGMII_AUTONEG_EN BIT(7) /* CFG3 bits */ #define DP83867_CFG3_INT_OE BIT(7) @@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev) DP83867_PHYCR_FORCE_LINK_GOOD, 0); } +static void dp83867_link_change_notify(struct phy_device *phydev) +{ + /* There is a limitation in DP83867 PHY device where SGMII AN is + * only triggered once after the device is booted up. Even after the + * PHY TPI is down and up again, SGMII AN is not triggered and + * hence no new in-band message from PHY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior + * to MAC. At this condition, once MAC side SGMII is up, MAC side + * SGMII wouldn`t receive new in-band message from TI PHY with + * correct link status, speed and duplex info. + * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg + * whenever there is a link change. + */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int val = 0; + + val = phy_clear_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + if (val < 0) + return; + + phy_set_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + } +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + + .link_change_notify = dp83867_link_change_notify, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 58d602985877..8a2dbe849866 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void) return ret; } -EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 7a8c11a26eb5..4704ed6f00ef 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1750,7 +1750,7 @@ static const struct driver_info ax88179_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1763,7 +1763,7 @@ static const struct driver_info ax88178a_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1776,7 +1776,7 @@ static const struct driver_info cypress_GX3_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1789,7 +1789,7 @@ static const struct driver_info dlink_dub1312_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1802,7 +1802,7 @@ static const struct driver_info sitecom_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1815,7 +1815,7 @@ static const struct driver_info samsung_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1828,7 +1828,7 @@ static const struct driver_info lenovo_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1841,7 +1841,7 @@ static const struct driver_info belkin_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1854,7 +1854,7 @@ static const struct driver_info toshiba_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1867,7 +1867,7 @@ static const struct driver_info mct_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1880,7 +1880,7 @@ static const struct driver_info at_umc2000_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1893,7 +1893,7 @@ static const struct driver_info at_umc200_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1906,7 +1906,7 @@ static const struct driver_info at_umc2000sp_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index a99aedff795d..ea7309453096 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + usb_anchor_urb(urb, &drv_data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err) { + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } drv_data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&drv_data->deferred); } static int nfcmrvl_resume(struct usb_interface *intf) diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 7e213f8ddc98..df8d27cf2956 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len; pr_debug("connectivity gate event: %x\n", event); @@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: - /* - * According to specification etsi 102 622 + /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that the aid_len + * is u8 in the packet, but u32 in the structure, and the tags in + * the packet are not included in nfc_evt_transaction. + * + * size in bytes: 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * member name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; - transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); - if (!transaction) - return -ENOMEM; - - transaction->aid_len = skb->data[1]; + aid_len = skb->data[1]; - /* Checking if the length of the AID is valid */ - if (transaction->aid_len > sizeof(transaction->aid)) - return -EINVAL; + if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) + return -EPROTO; - memcpy(transaction->aid, &skb->data[2], - transaction->aid_len); + params_len = skb->data[aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + /* Verify PARAMETERS tag is (82), and final check that there is enough + * space in the packet to read everything. + */ + if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || + (skb->len < aid_len + 4 + params_len)) return -EPROTO; - transaction->params_len = skb->data[transaction->aid_len + 3]; + transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); + if (!transaction) + return -ENOMEM; - /* Total size is allocated (skb->len - 2) minus fixed array members */ - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) - return -EINVAL; + transaction->aid_len = aid_len; + transaction->params_len = params_len; - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 24165daee3c8..3ab2cfd254a4 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3285,8 +3285,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, * we have no UUID set */ if (uuid_is_null(&ids->uuid)) { - printk_ratelimited(KERN_WARNING - "No UUID available providing old NGUID\n"); + dev_warn_ratelimited(dev, + "No UUID available providing old NGUID\n"); return sysfs_emit(buf, "%pU\n", ids->nguid); } return sysfs_emit(buf, "%pU\n", &ids->uuid); @@ -3863,6 +3863,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, if (ret) { dev_err(ctrl->device, "globally duplicate IDs for nsid %d\n", nsid); + nvme_print_device_info(ctrl); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9b72b6ecf33c..0da94b233fed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -503,6 +503,7 @@ struct nvme_ctrl_ops { void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); + void (*print_device_info)(struct nvme_ctrl *ctrl); }; /* @@ -548,6 +549,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); } +/* + * Return the length of the string without the space padding + */ +static inline int nvme_strlen(char *s, int len) +{ + while (s[len - 1] == ' ') + len--; + return len; +} + +static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) +{ + struct nvme_subsystem *subsys = ctrl->subsys; + + if (ctrl->ops->print_device_info) { + ctrl->ops->print_device_info(ctrl); + return; + } + + dev_err(ctrl->device, + "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, const char *dev_name); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 48f4f6eb877b..c7012e85d035 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", csts, result); + + if (csts != ~0) + return; + + dev_warn(dev->ctrl.device, + "Does your device have a faulty power saving mode enabled?\n"); + dev_warn(dev->ctrl.device, + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) @@ -2976,6 +2984,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); } + +static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + struct nvme_subsystem *subsys = ctrl->subsys; + + dev_err(ctrl->device, + "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", + pdev->vendor, pdev->device, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, @@ -2987,6 +3010,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, .get_address = nvme_pci_get_address, + .print_device_info = nvme_pci_print_device_info, }; static int nvme_dev_map(struct nvme_dev *dev) @@ -3421,7 +3445,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ - .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_NO_NS_DESC_LIST, }, @@ -3437,6 +3462,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ @@ -3449,10 +3476,20 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ @@ -3463,6 +3500,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig index 72df4b8f4dd8..09c7829e95c4 100644 --- a/drivers/platform/mellanox/Kconfig +++ b/drivers/platform/mellanox/Kconfig @@ -85,7 +85,7 @@ config NVSW_SN2201 depends on I2C depends on REGMAP_I2C help - This driver provides support for the Nvidia SN2201 platfom. + This driver provides support for the Nvidia SN2201 platform. The SN2201 is a highly integrated for one rack unit system with L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28 ports in a compact 1RU form factor. The system also including a diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index 0bcdc7c75007..2923daf63b75 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = { }; /* SN2201 I2C platform data. */ -struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { +static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { .irq = NVSW_SN2201_CPLD_SYSIRQ, }; diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index d421e1482395..6b51ad01f791 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES if MIPS_PLATFORM_DEVICES config CPU_HWMON - tristate "Loongson-3 CPU HWMon Driver" + bool "Loongson-3 CPU HWMon Driver" depends on MACH_LOONGSON64 select HWMON default y diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c index 05534287bc26..8dd672339485 100644 --- a/drivers/platform/x86/barco-p50-gpio.c +++ b/drivers/platform/x86/barco-p50-gpio.c @@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids); static int __init p50_module_init(void) { struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1); + int ret; if (!dmi_first_match(dmi_ids)) return -ENODEV; - platform_driver_register(&p50_gpio_driver); + ret = platform_driver_register(&p50_gpio_driver); + if (ret) + return ret; gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1); if (IS_ERR(gpio_pdev)) { diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 1ef606e3ef80..497ad2f64a51 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) }} static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), @@ -156,6 +157,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"), { } }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 667f94bba905..0d8cb22e30df 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" #define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95 +#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required /* DMI board names of devices that should use the omen specific path for * thermal profiles. @@ -220,6 +221,7 @@ static struct input_dev *hp_wmi_input_dev; static struct platform_device *hp_wmi_platform_dev; static struct platform_profile_handler platform_profile_handler; static bool platform_profile_support; +static bool zero_insize_support; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; @@ -290,14 +292,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, struct bios_return *bios_return; union acpi_object *obj = NULL; struct bios_args *args = NULL; - int mid, actual_outsize, ret; + int mid, actual_insize, actual_outsize; size_t bios_args_size; + int ret; mid = encode_outsize_for_pvsz(outsize); if (WARN_ON(mid < 0)) return mid; - bios_args_size = struct_size(args, data, insize); + actual_insize = max(insize, 128); + bios_args_size = struct_size(args, data, actual_insize); args = kmalloc(bios_args_size, GFP_KERNEL); if (!args) return -ENOMEM; @@ -374,7 +378,7 @@ static int hp_wmi_read_int(int query) int val = 0, ret; ret = hp_wmi_perform_query(query, HPWMI_READ, &val, - 0, sizeof(val)); + zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -410,7 +414,8 @@ static int hp_wmi_get_tablet_mode(void) return -ENODEV; ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ, - system_device_mode, 0, sizeof(system_device_mode)); + system_device_mode, zero_if_sup(system_device_mode), + sizeof(system_device_mode)); if (ret < 0) return ret; @@ -497,7 +502,7 @@ static int hp_wmi_fan_speed_max_get(void) int val = 0, ret; ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM, - &val, 0, sizeof(val)); + &val, zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -509,7 +514,7 @@ static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -520,7 +525,7 @@ static int __init hp_wmi_bios_2009_later(void) { u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -598,7 +603,7 @@ static int hp_wmi_rfkill2_refresh(void) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err; @@ -1007,7 +1012,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err < 0 ? err : -EINVAL; @@ -1483,11 +1488,15 @@ static int __init hp_wmi_init(void) { int event_capable = wmi_has_guid(HPWMI_EVENT_GUID); int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID); - int err; + int err, tmp = 0; if (!bios_capable && !event_capable) return -ENODEV; + if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp, + sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS) + zero_insize_support = true; + if (event_capable) { err = hp_wmi_input_setup(); if (err) diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 216d31e3403d..79cff1fc675c 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index edaf22e5ae98..40183bda7894 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -1912,6 +1912,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), {} }; diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 34daf9df168b..ace1239bc0a0 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev, auxiliary_set_drvdata(auxdev, priv); for (i = 0; i < intel_vsec_dev->num_resources; i++) { - struct intel_pmt_entry *entry = &priv->entry[i].entry; + struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry; ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i); if (ret < 0) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index d0eab5700dc5..00684e11976b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); -static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); -static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); unsigned long flags; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Re-enable the CRQ */ do { @@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Close the CRQ */ do { @@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, queue->cur = 0; queue->fmt = fmt; queue->size = PAGE_SIZE / fmt_size; + + queue->vhost = vhost; return 0; } @@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, ENTER; - if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) - return -ENOMEM; - rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); @@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, } scrq->hwq_id = index; - scrq->vhost = vhost; LEAVE; return 0; @@ -5800,7 +5798,6 @@ irq_failed: rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); } while (rtas_busy_delay(rc)); reg_failed: - ibmvfc_free_queue(vhost, scrq); LEAVE; return rc; } @@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) if (rc) dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); - ibmvfc_free_queue(vhost, scrq); + /* Clean out the queue */ + memset(scrq->msgs.crq, 0, PAGE_SIZE); + scrq->cur = 0; + + LEAVE; +} + +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i, j; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) { + if (ibmvfc_register_scsi_channel(vhost, i)) { + for (j = i; j > 0; j--) + ibmvfc_deregister_scsi_channel(vhost, j - 1); + vhost->do_enquiry = 0; + return; + } + } + + LEAVE; +} + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) + ibmvfc_deregister_scsi_channel(vhost, i); + LEAVE; } static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i, j; ENTER; @@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) } for (i = 0; i < nr_scsi_hw_queues; i++) { - if (ibmvfc_register_scsi_channel(vhost, i)) { - for (j = i; j > 0; j--) - ibmvfc_deregister_scsi_channel(vhost, j - 1); + scrq = &vhost->scsi_scrqs.scrqs[i]; + if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { + for (j = i; j > 0; j--) { + scrq = &vhost->scsi_scrqs.scrqs[j - 1]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; vhost->do_enquiry = 0; - break; + vhost->mq_enabled = 0; + return; } } + ibmvfc_reg_sub_crqs(vhost); + LEAVE; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i; ENTER; if (!vhost->scsi_scrqs.scrqs) return; - for (i = 0; i < nr_scsi_hw_queues; i++) - ibmvfc_deregister_scsi_channel(vhost, i); + ibmvfc_dereg_sub_crqs(vhost); + + for (i = 0; i < nr_scsi_hw_queues; i++) { + scrq = &vhost->scsi_scrqs.scrqs[i]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3718406e0988..c39a245f43d0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -789,6 +789,7 @@ struct ibmvfc_queue { spinlock_t _lock; spinlock_t *q_lock; + struct ibmvfc_host *vhost; struct ibmvfc_event_pool evt_pool; struct list_head sent; struct list_head free; @@ -797,7 +798,6 @@ struct ibmvfc_queue { union ibmvfc_iu cancel_rsp; /* Sub-CRQ fields */ - struct ibmvfc_host *vhost; unsigned long cookie; unsigned long vios_cookie; unsigned long hw_irq; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 256ec6d08c16..9d01a3e3c26a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { - while (--i > 0) + while (--i >= 0) dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, @@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { - while (--i >= 0) + while (--i > 0) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b1be0dd0337a..f5d74958b664 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); -void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i, - struct lpfc_wcqe_complete *w); void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); @@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl); + struct lpfc_iocbq *rspiocb); void lpfc_create_multixri_pools(struct lpfc_hba *phba); void lpfc_create_destroy_pools(struct lpfc_hba *phba); void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9d36b20fb878..13dfe285493d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); - bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64; + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.w = le32_to_cpu(bpl->tus.w); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 07f9a6e61e10..3fababb7c181 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_DID, ulp_status, ulp_word4); - /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */ if (lpfc_error_lost_link(ulp_status, ulp_word4)) { - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_DEVICE_RM); skip_recovery = 1; goto out; } @@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - - /* Presume the node was released. */ - return; + goto out_rsrc_free; } out: - /* Driver is done with the IO. */ - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - /* At this point, the LOGO processing is complete. NOTE: For a * pt2pt topology, we are assuming the NPortID will only change * on link up processing. For a LOGO / PLOGI initiated by the @@ -3059,6 +3048,10 @@ out: ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes); + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + lpfc_disc_start(vport); return; } @@ -3075,6 +3068,10 @@ out: lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } +out_rsrc_free: + /* Driver is done with the I/O. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); } /** diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8511369d2cf8..f024415731ac 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4487,6 +4487,9 @@ struct wqe_common { #define wqe_sup_SHIFT 6 #define wqe_sup_MASK 0x00000001 #define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 93b94c64518d..750dd1e9f2cc 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); + "0012 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 639f86635127..b86ff9fcdf0c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nvmet_invalidate_host(phba, ndlp); if (ndlp->nlp_DID == Fabric_DID) { - if (vport->port_state <= LPFC_FDISC) + if (vport->port_state <= LPFC_FDISC || + vport->fc_flag & FC_PT2PT) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 335e90633933..cd10ee6482fc 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->rcv_rsplen = wcqe->parameter; nCmd->status = 0; + /* Get the NVME cmd details for this unique error. */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + /* Check if this is really an ERSP */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->result = 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6084 NVME Completion ERSP: " - "xri %x placed x%x\n", - lpfc_ncmd->cur_iocbq.sli4_xritag, - wcqe->total_data_placed); + "6084 NVME FCP_ERR ERSP: " + "xri %x placed x%x opcode x%x cmd_id " + "x%x cqe_status x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " - "placed x%x\n", + "placed x%x opcode x%x cmd_id x%x, " + "cqe_status x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->status, lpfc_ncmd->result, - wcqe->total_data_placed); + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; case IOSTAT_LOCAL_REJECT: /* Let fall through to set command final state. */ @@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; - struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + struct nvme_common_command *sqe; + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; @@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, cstat->control_requests++; } - if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer @@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. - * @abts_cmpl: Pointer to wcqe complete object. + * @rspiocb: Pointer to response iocb object. * * This is the callback function for any NVME FCP IO that was aborted. * @@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, **/ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl) + struct lpfc_iocbq *rspiocb) { + struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6145 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " @@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_fcpreq_priv *freqpriv; unsigned long flags; int ret_val; + struct nvme_fc_cmd_iu *cp; /* Validate pointers. LLDD fault handling with transport does * have timing races. @@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; } + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6138 Transport Abort NVME Request Issued for " - "ox_id x%x\n", - nvmereq_wqe->sli4_xritag); + "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n", + nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode, + cp->sqe.common.command_id); return; out_unlock: diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d43968203248..ba5e4016262e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) int status; u32 logit = LOG_FCP; + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6ed696c4602a..80ac3a051c19 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, - "6213 No available WQEs for CMF_SYNC_WQE\n"); + "6244 No available WQEs for CMF_SYNC_WQE\n"); ret_val = ENOMEM; goto out_unlock; } @@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, set_job_ulpword4(cmdiocbp, IOERR_ABORT_REQUESTED); /* - * For SLI4, irsiocb contains + * For SLI4, irspiocb contains * NO_XRI in sli_xritag, it * shall not affect releasing * sgl (xri) process. @@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } } - (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq); + cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { @@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; if (cmdiocbq->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, - &rspiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); } break; @@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to - * send an iocb command to an HBA with SLI-4 interface spec. + * send an iocb command to an HBA with SLI-3 interface spec. * * This function takes the hbalock before invoking the lockless version. * The function will return success after it successfully submit the wqe to @@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; cmdiocbq->wait_cmd_cmpl = NULL; if (cmdiocbq->cmd_cmpl) - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); else lpfc_sli_release_iocbq(phba, cmdiocbq); return; @@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, /* Set the exchange busy flag for task management commands */ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && - !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { + !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, - cur_iocbq); + cur_iocbq); if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; else @@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) * @irspiocbq: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event and construct - * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common + * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common * discovery engine to handle. * * Return: Pointer to the receive IOCBQ, NULL otherwise. @@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, if (bf_get(lpfc_wcqe_c_xb, wcqe)) { spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore(&phba->hbalock, iflags); } @@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Pass the cmd_iocb and the wcqe to the upper layer */ memcpy(&cmdiocbq->wcqe_cmpl, wcqe, sizeof(struct lpfc_wcqe_complete)); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " @@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, /* Free iocb created in lpfc_prep_seq */ list_for_each_entry_safe(curr_iocb, next_iocb, - &iocbq->list, list) { + &iocbq->list, list) { list_del_init(&curr_iocb->list); lpfc_sli_release_iocbq(phba, curr_iocb); } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4fab79ed58ed..2ab6f7db64d8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.3" +#define LPFC_DRIVER_VERSION "14.2.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 37d46ae5c61d..9a1ae52bb621 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + u16 depth; int sz; int rc = 0; @@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) goto out; /* sas iounit page 1 */ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); - sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL); if (!sas_iounit_pg1) { pr_err("%s: failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ioc->name, __FILE__, __LINE__, __func__); goto out; } - ioc->max_wideport_qd = - (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_narrowport_qd = - (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? - sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH; + + depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); + ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); + ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = sas_iounit_pg1->SATAMaxQDepth; + ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); + /* pcie iounit page 1 */ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bfce60183a6e..836ddc476764 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) return 0; out_unwind: - while (--i > 0) + while (--i >= 0) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pci_free_irq_vectors(pdev); return rc; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1f423f723d06..b8a76b89f85a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip, } } +static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + switch (zsp->z_cond) { + case ZC2_IMPLICIT_OPEN: + devip->nr_imp_open--; + break; + case ZC3_EXPLICIT_OPEN: + devip->nr_exp_open--; + break; + default: + WARN_ONCE(true, "Invalid zone %llu condition %x\n", + zsp->z_start, zsp->z_cond); + break; + } + zsp->z_cond = ZC5_FULL; +} + static void zbc_inc_wp(struct sdebug_dev_info *devip, unsigned long long lba, unsigned int num) { @@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, if (zsp->z_type == ZBC_ZTYPE_SWR) { zsp->z_wp += num; if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); return; } @@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, n = num; } if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); num -= n; lba += n; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 2c0dd64159b0..5d21f07456c6 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size) return NULL; mutex_lock(&iscsi_ep_idr_mutex); - id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); + + /* + * First endpoint id should be 1 to comply with user space + * applications (iscsid). + */ + id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 895b56c8f25e..a1a2ac09066f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp) goto out; /* We must have at least a 64B header and one 32B range descriptor */ - vpd_len = get_unaligned_be16(&buffer[2]) + 3; + vpd_len = get_unaligned_be16(&buffer[2]) + 4; if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { sd_printk(KERN_ERR, sdkp, "Invalid Concurrent Positioning Ranges VPD page\n"); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ca3530982e52..fe000da11332 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = { .cmd_per_lun = 2048, .this_id = -1, /* Ensure there are no gaps in presented sgls */ - .virt_boundary_mask = PAGE_SIZE-1, + .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, .no_write_same = 1, .track_queue_depth = 1, .change_queue_depth = storvsc_change_queue_depth, @@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device, int target = 0; struct storvsc_device *stor_device; int max_sub_channels = 0; + u32 max_xfer_bytes; /* * We support sub-channels for storage on SCSI and FC controllers. @@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device, } /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; - /* - * set the table size based on the info we got - * from the host. + * Any reasonable Hyper-V configuration should provide + * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, + * protecting it from any weird value. + */ + max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + /* max_hw_sectors_kb */ + host->max_sectors = max_xfer_bytes >> 9; + /* + * There are 2 requirements for Hyper-V storvsc sgl segments, + * based on which the below calculation for max segments is + * done: + * + * 1. Except for the first and last sgl segment, all sgl segments + * should be align to HV_HYP_PAGE_SIZE, that also means the + * maximum number of segments in a sgl can be calculated by + * dividing the total max transfer length by HV_HYP_PAGE_SIZE. + * + * 2. Except for the first and last, each entry in the SGL must + * have an offset that is a multiple of HV_HYP_PAGE_SIZE. */ - host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); + host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; /* * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 51a82f7803d3..9d16cf925483 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc { u8 tag; u8 bus; u8 target; - u8 vcpuHint; - u8 unused[59]; + u16 vcpuHint; + u8 unused[58]; } __packed; /* diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig index d1a0dea09ef0..d0ba34cc32f7 100644 --- a/drivers/staging/olpc_dcon/Kconfig +++ b/drivers/staging/olpc_dcon/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config FB_OLPC_DCON tristate "One Laptop Per Child Display CONtroller support" - depends on OLPC && FB + depends on OLPC && FB && BROKEN depends on I2C depends on GPIO_CS5535 && ACPI select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c index 3d8e9dea7651..7135d89caac1 100644 --- a/drivers/staging/r8188eu/core/rtw_xmit.c +++ b/drivers/staging/r8188eu/core/rtw_xmit.c @@ -178,8 +178,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - res = rtw_alloc_hwxmits(padapter); - if (res) { + if (rtw_alloc_hwxmits(padapter)) { res = _FAIL; goto exit; } @@ -1483,19 +1482,10 @@ int rtw_alloc_hwxmits(struct adapter *padapter) hwxmits = pxmitpriv->hwxmits; - if (pxmitpriv->hwxmit_entry == 5) { - hwxmits[0] .sta_queue = &pxmitpriv->bm_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[2] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - hwxmits[4] .sta_queue = &pxmitpriv->be_pending; - } else if (pxmitpriv->hwxmit_entry == 4) { - hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[2] .sta_queue = &pxmitpriv->be_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - } else { - } + hwxmits[0].sta_queue = &pxmitpriv->vo_pending; + hwxmits[1].sta_queue = &pxmitpriv->vi_pending; + hwxmits[2].sta_queue = &pxmitpriv->be_pending; + hwxmits[3].sta_queue = &pxmitpriv->bk_pending; return 0; } diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 1b09462ca908..8dd280e2739a 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep_total_len = wep_key_len + sizeof(*pwep); pwep = kzalloc(wep_total_len, GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index ece97e37ac91..30374a820496 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -90,7 +90,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) { ret = -ENOMEM; goto exit; @@ -582,7 +583,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c7968aecd870..d02de3f0326f 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -426,7 +426,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) tty_unregister_device(goldfish_tty_driver, qtty->console.index); iounmap(qtty->base); qtty->base = NULL; - free_irq(qtty->irq, pdev); + free_irq(qtty->irq, qtty); tty_port_destroy(&qtty->port); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 137eebdcfda9..fd4d24f61c46 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -455,7 +455,7 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data, return; } - prefix = kasprintf(GFP_KERNEL, "%s: ", fname); + prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 78b6dedc43e6..8f32fe9e149e 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1517,6 +1517,8 @@ static inline void __stop_tx(struct uart_8250_port *p) unsigned char lsr = serial_in(p, UART_LSR); u64 stop_delay = 0; + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (!(lsr & UART_LSR_THRE)) return; /* diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 4733a233bd0c..f8f950641ad9 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1306,6 +1306,7 @@ static const struct uart_ops qcom_geni_console_pops = { .stop_tx = qcom_geni_serial_stop_tx, .start_tx = qcom_geni_serial_start_tx, .stop_rx = qcom_geni_serial_stop_rx, + .start_rx = qcom_geni_serial_start_rx, .set_termios = qcom_geni_serial_set_termios, .startup = qcom_geni_serial_startup, .request_port = qcom_geni_serial_request_port, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9a85b41caa0a..338ebadfd44b 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2214,11 +2214,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) /* * Nothing to do if the console is not suspending * except stop_rx to prevent any asynchronous data - * over RX line. Re-start_rx, when required, is - * done by set_termios in resume sequence + * over RX line. However ensure that we will be + * able to Re-start_rx later. */ if (!console_suspend_enabled && uart_console(uport)) { - uport->ops->stop_rx(uport); + if (uport->ops->start_rx) + uport->ops->stop_rx(uport); goto unlock; } @@ -2310,6 +2311,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (console_suspend_enabled) uart_change_pm(state, UART_PM_STATE_ON); uport->ops->set_termios(uport, &termios, NULL); + if (!console_suspend_enabled && uport->ops->start_rx) + uport->ops->start_rx(uport); if (console_suspend_enabled) console_start(uport->cons); } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 01fb4bad86be..ce86d1b790c0 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register + * ufshcd_utrl_clear() - Clear requests from the controller request list. * @hba: per adapter instance - * @pos: position of the bit to be cleared + * @mask: mask with one bit set for each request to be cleared */ -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) { if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) - ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); - else - ufshcd_writel(hba, ~(1 << pos), - REG_UTP_TRANSFER_REQ_LIST_CLEAR); + mask = ~mask; + /* + * From the UFSHCI specification: "UTP Transfer Request List CLear + * Register (UTRLCLR): This field is bit significant. Each bit + * corresponds to a slot in the UTP Transfer Request List, where bit 0 + * corresponds to request slot 0. A bit in this field is set to ‘0’ + * by host software to indicate to the host controller that a transfer + * request slot is cleared. The host controller + * shall free up any resources associated to the request slot + * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The + * host software indicates no change to request slots by setting the + * associated bits in this field to ‘1’. Bits in this field shall only + * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." + */ + ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); } /** @@ -2863,27 +2874,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, return ufshcd_compose_devman_upiu(hba, lrbp); } -static int -ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +/* + * Clear all the requests from the controller for which a bit has been set in + * @mask and wait until the controller confirms that these requests have been + * cleared. + */ +static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) { - int err = 0; unsigned long flags; - u32 mask = 1 << tag; /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_utrl_clear(hba, tag); + ufshcd_utrl_clear(hba, mask); spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. * max. wait is 1 sec. */ - err = ufshcd_wait_for_register(hba, - REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); - - return err; + return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); } static int @@ -2963,7 +2973,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag)) /* successfully cleared the command, retry if needed */ err = -EAGAIN; /* @@ -6958,14 +6968,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, } /** - * ufshcd_eh_device_reset_handler - device reset handler registered to - * scsi layer. + * ufshcd_eh_device_reset_handler() - Reset a single logical unit. * @cmd: SCSI command pointer * * Returns SUCCESS/FAILED */ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { + unsigned long flags, pending_reqs = 0, not_cleared = 0; struct Scsi_Host *host; struct ufs_hba *hba; u32 pos; @@ -6984,14 +6994,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) } /* clear the commands that were pending for corresponding LUN */ - for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lun) { - err = ufshcd_clear_cmd(hba, pos); - if (err) - break; - __ufshcd_transfer_req_compl(hba, 1U << pos); - } + spin_lock_irqsave(&hba->outstanding_lock, flags); + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) + if (hba->lrb[pos].lun == lun) + __set_bit(pos, &pending_reqs); + hba->outstanding_reqs &= ~pending_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { + spin_lock_irqsave(&hba->outstanding_lock, flags); + not_cleared = pending_reqs & + ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + hba->outstanding_reqs |= not_cleared; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + dev_err(hba->dev, "%s: failed to clear requests %#lx\n", + __func__, not_cleared); } + __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); out: hba->req_abort_count = 0; @@ -7088,7 +7108,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) goto out; } - err = ufshcd_clear_cmd(hba, tag); + err = ufshcd_clear_cmds(hba, 1U << tag); if (err) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index e45c3d6e1536..794e413800ae 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) } if (enqd_len + trb_buff_len >= full_len) { - if (need_zero_pkt) - zero_len_trb = !zero_len_trb; - - field &= ~TRB_CHAIN; - field |= TRB_IOC; - more_trbs_coming = false; - preq->td.last_trb = ring->enqueue; + if (need_zero_pkt && !zero_len_trb) { + zero_len_trb = true; + } else { + zero_len_trb = false; + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } } /* Only set interrupt on short packet for OUT endpoints. */ @@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); - cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb, + cdnsp_queue_trb(pdev, ring, more_trbs_coming, lower_32_bits(send_addr), upper_32_bits(send_addr), length_field, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f63a27d11fac..3f107a06817d 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -5190,7 +5190,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -EINVAL; - goto error1; + goto error2; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index e027c0420dc3..573421984948 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1644,13 +1644,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) * This device property is for kernel internal use only and * is expected to be set by the glue code. */ - if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { - edev = extcon_get_extcon_dev(name); - if (!edev) - return ERR_PTR(-EPROBE_DEFER); - - return edev; - } + if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) + return extcon_get_extcon_dev(name); /* * Try to get an extcon device from the USB PHY controller's "port" diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index ba51de7dd760..6b018048fe2e 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -127,6 +127,7 @@ static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 00427d108ab9..8716bece1072 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2976,6 +2976,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 mdwidth; int size; + int maxpacket; mdwidth = dwc3_mdwidth(dwc); @@ -2988,21 +2989,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) else size = DWC31_GTXFIFOSIZ_TXFDEP(size); - /* FIFO Depth is in MDWDITH bytes. Multiply */ - size *= mdwidth; - /* - * To meet performance requirement, a minimum TxFIFO size of 3x - * MaxPacketSize is recommended for endpoints that support burst and a - * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't - * support burst. Use those numbers and we can calculate the max packet - * limit as below. + * maxpacket size is determined as part of the following, after assuming + * a mult value of one maxpacket: + * DWC3 revision 280A and prior: + * fifo_size = mult * (max_packet / mdwidth) + 1; + * maxpacket = mdwidth * (fifo_size - 1); + * + * DWC3 revision 290A and onwards: + * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 + * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth; */ - if (dwc->maximum_speed >= USB_SPEED_SUPER) - size /= 3; + if (DWC3_VER_IS_PRIOR(DWC3, 290A)) + maxpacket = mdwidth * (size - 1); else - size /= 2; + maxpacket = mdwidth * ((size - 1) - 1) - mdwidth; + /* Functionally, space for one max packet is sufficient */ + size = min_t(int, maxpacket, 1024); usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 16; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 4585ee3a444a..e0fa4b186ec6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -122,8 +122,6 @@ struct ffs_ep { struct usb_endpoint_descriptor *descs[3]; u8 num; - - int status; /* P: epfile->mutex */ }; struct ffs_epfile { @@ -227,6 +225,9 @@ struct ffs_io_data { bool use_sg; struct ffs_data *ffs; + + int status; + struct completion done; }; struct ffs_desc_helper { @@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = { static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { + struct ffs_io_data *io_data = req->context; + ENTER(); - if (req->context) { - struct ffs_ep *ep = _ep->driver_data; - ep->status = req->status ? req->status : req->actual; - complete(req->context); - } + if (req->status) + io_data->status = req->status; + else + io_data->status = req->actual; + + complete(&io_data->done); } static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter) @@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) WARN(1, "%s: data_len == -EINVAL\n", __func__); ret = -EINVAL; } else if (!io_data->aio) { - DECLARE_COMPLETION_ONSTACK(done); bool interrupted = false; req = ep->req; @@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) io_data->buf = data; - req->context = &done; + init_completion(&io_data->done); + req->context = io_data; req->complete = ffs_epfile_io_complete; ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); @@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); - if (wait_for_completion_interruptible(&done)) { + if (wait_for_completion_interruptible(&io_data->done)) { + spin_lock_irq(&epfile->ffs->eps_lock); + if (epfile->ep != ep) { + ret = -ESHUTDOWN; + goto error_lock; + } /* * To avoid race condition with ffs_epfile_io_complete, * dequeue the request first then check @@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * condition with req->complete callback. */ usb_ep_dequeue(ep->ep, req); - wait_for_completion(&done); - interrupted = ep->status < 0; + spin_unlock_irq(&epfile->ffs->eps_lock); + wait_for_completion(&io_data->done); + interrupted = io_data->status < 0; } if (interrupted) ret = -EINTR; - else if (io_data->read && ep->status > 0) - ret = __ffs_epfile_read_data(epfile, data, ep->status, + else if (io_data->read && io_data->status > 0) + ret = __ffs_epfile_read_data(epfile, data, io_data->status, &io_data->data); else - ret = ep->status; + ret = io_data->status; goto error_mutex; } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6f5d45ef2e39..f51694f29de9 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -775,9 +775,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, dev->qmult = qmult; snprintf(net->name, sizeof(net->name), "%s%%d", netname); - if (get_ether_addr(dev_addr, addr)) + if (get_ether_addr(dev_addr, addr)) { + net->addr_assign_type = NET_ADDR_RANDOM; dev_warn(&g->dev, "using random %s ethernet address\n", "self"); + } else { + net->addr_assign_type = NET_ADDR_SET; + } eth_hw_addr_set(net, addr); if (get_ether_addr(host_addr, dev->host_mac)) dev_warn(&g->dev, @@ -844,6 +848,10 @@ struct net_device *gether_setup_name_default(const char *netname) eth_random_addr(dev->dev_mac); pr_warn("using random %s ethernet address\n", "self"); + + /* by default we always have a random MAC address */ + net->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->host_mac); pr_warn("using random %s ethernet address\n", "host"); @@ -871,7 +879,6 @@ int gether_register_netdev(struct net_device *net) dev = netdev_priv(net); g = dev->gadget; - net->addr_assign_type = NET_ADDR_RANDOM; eth_hw_addr_set(net, dev->dev_mac); status = register_netdev(net); @@ -912,6 +919,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) if (get_ether_addr(dev_addr, new_addr)) return -EINVAL; memcpy(dev->dev_mac, new_addr, ETH_ALEN); + net->addr_assign_type = NET_ADDR_SET; return 0; } EXPORT_SYMBOL_GPL(gether_set_dev_addr); diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 6117ae8e7242..cea10cdb83ae 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3016,6 +3016,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) } udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); + of_node_put(isp1301_node); if (!udc->isp1301_i2c_client) { return -EPROBE_DEFER; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f0ab63138016..9ac56e9ffc64 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1107,7 +1107,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; @@ -1214,23 +1213,19 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * first with the primary HCD, and then with the secondary HCD. * If we don't do the same, the host will never be started. */ - if (!usb_hcd_is_primary_hcd(hcd)) - secondary_hcd = hcd; - else - secondary_hcd = xhci->shared_hcd; - xhci_dbg(xhci, "Initialize the xhci_hcd\n"); - retval = xhci_init(hcd->primary_hcd); + retval = xhci_init(hcd); if (retval) return retval; comp_timer_running = true; xhci_dbg(xhci, "Start the primary HCD\n"); - retval = xhci_run(hcd->primary_hcd); - if (!retval && secondary_hcd) { + retval = xhci_run(hcd); + if (!retval && xhci->shared_hcd) { xhci_dbg(xhci, "Start the secondary HCD\n"); - retval = xhci_run(secondary_hcd); + retval = xhci_run(xhci->shared_hcd); } + hcd->state = HC_STATE_SUSPENDED; if (xhci->shared_hcd) xhci->shared_hcd->state = HC_STATE_SUSPENDED; diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index a7b3c15957ba..feba2a8d1233 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; @@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index 52cbc353051f..9a6f742ad3ab 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -212,6 +212,7 @@ // // Definitions for other product IDs #define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device +#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4) #define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e60425bbf537..ed1e50d83cca 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 +#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8 +#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9 #define CINTERION_PRODUCT_MV32_WA 0x00f1 #define CINTERION_PRODUCT_MV32_WB 0x00f2 @@ -1979,6 +1981,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index b7a955479156..1b6d46b86f81 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue { /* Resources for implementing the notification channel from the device * to the driver. fwqp is the firmware end of an RC connection; the - * other end is vqqp used by the driver. cq is is where completions are + * other end is vqqp used by the driver. cq is where completions are * reported. */ struct mlx5_vdpa_cq cq; @@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) id = mlx5vdpa16_to_cpu(mvdev, vlan); mac_vlan_del(ndev, ndev->config.mac, id, true); + status = VIRTIO_NET_OK; break; default: - break; -} + break; + } -return status; + return status; } static void mlx5_cvq_kick_handler(struct work_struct *work) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index d503848b3b6e..776ad7496f53 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config, dev->minor = ret; dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT; - dev->dev = device_create(vduse_class, NULL, - MKDEV(MAJOR(vduse_major), dev->minor), - dev, "%s", config->name); + dev->dev = device_create_with_groups(vduse_class, NULL, + MKDEV(MAJOR(vduse_major), dev->minor), + dev, vduse_dev_groups, "%s", config->name); if (IS_ERR(dev->dev)) { ret = PTR_ERR(dev->dev); goto err_dev; @@ -1596,7 +1596,6 @@ static int vduse_init(void) return PTR_ERR(vduse_class); vduse_class->devnode = vduse_devnode; - vduse_class->dev_groups = vduse_dev_groups; ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse"); if (ret) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 935a1d0ddb97..5ad2596c6e8a 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_VDPA_GET_VRING_GROUP: + if (!ops->get_vq_group) + return -EOPNOTSUPP; s.index = idx; s.num = ops->get_vq_group(vdpa, idx); if (s.num >= vdpa->ngroups) diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 14e2043d7685..eab55accf381 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { - int err, count = 0, up_next, desc_max; + int err, count = 0, indirect_count = 0, up_next, desc_max; struct vring_desc desc, *descs; struct vringh_range range = { -1ULL, 0 }, slowrange; bool slow = false; @@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i, continue; } - if (count++ == vrh->vring.num) { + if (up_next == -1) + count++; + else + indirect_count++; + + if (count > vrh->vring.num || indirect_count > desc_max) { vringh_bad("Descriptor loop in %p", descs); err = -ELOOP; goto fail; @@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i, i = return_from_indirect(vrh, &up_next, &descs, &desc_max); slow = false; + indirect_count = 0; } else break; } diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index b5adf6abd241..a6dc8b5846fe 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -6,12 +6,6 @@ config VIRTIO bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG or CONFIG_S390_GUEST. -config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - bool - help - This option is selected if the architecture may need to enforce - VIRTIO_F_ACCESS_PLATFORM - config VIRTIO_PCI_LIB tristate help diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index ef04a96942bf..6bace84ae37e 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/idr.h> #include <linux/of.h> +#include <linux/platform-feature.h> #include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ @@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status); static int virtio_features_ok(struct virtio_device *dev) { unsigned int status; - int ret; might_sleep(); - ret = arch_has_restricted_virtio_memory_access(); - if (ret) { + if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) { if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { dev_warn(&dev->dev, "device must provide VIRTIO_F_VERSION_1\n"); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f9a36bc7ac27..c9bec3813e94 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status) /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); @@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device, if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); if (err) { + put_device(&vm_cmdline_parent); pr_err("Failed to register parent device!\n"); return err; } diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index a0fa14f28a7f..b790f30b2b56 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev, /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ vp_iowrite8(status, &cfg->device_status); diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c index b0b2d7a6fdde..2fd85be88278 100644 --- a/drivers/watchdog/gxp-wdt.c +++ b/drivers/watchdog/gxp-wdt.c @@ -172,3 +172,4 @@ module_platform_driver(gxp_wdt_driver); MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>"); MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>"); MODULE_DESCRIPTION("Driver for GXP watchdog timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 120d32f164ac..bfd5f4f706bc 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC having to balloon out RAM regions in order to obtain physical memory space to create such mappings. +config XEN_GRANT_DMA_IOMMU + bool + select IOMMU_API + +config XEN_GRANT_DMA_OPS + bool + select DMA_OPS + +config XEN_VIRTIO + bool "Xen virtio support" + depends on VIRTIO + select XEN_GRANT_DMA_OPS + select XEN_GRANT_DMA_IOMMU if OF + help + Enable virtio support for running as Xen guest. Depending on the + guest type this will require special support on the backend side + (qemu or kernel, depending on the virtio device types used). + + If in doubt, say n. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 5aae66e638a7..c0503f1c7d5b 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o +obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o +obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c new file mode 100644 index 000000000000..16b8bc0c0b33 --- /dev/null +++ b/drivers/xen/grant-dma-iommu.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stub IOMMU driver which does nothing. + * The main purpose of it being present is to reuse generic IOMMU device tree + * bindings by Xen grant DMA-mapping layer. + * + * Copyright (C) 2022 EPAM Systems Inc. + */ + +#include <linux/iommu.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +struct grant_dma_iommu_device { + struct device *dev; + struct iommu_device iommu; +}; + +/* Nothing is really needed here */ +static const struct iommu_ops grant_dma_iommu_ops; + +static const struct of_device_id grant_dma_iommu_of_match[] = { + { .compatible = "xen,grant-dma" }, + { }, +}; + +static int grant_dma_iommu_probe(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu; + int ret; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + mmu->dev = &pdev->dev; + + ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int grant_dma_iommu_remove(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iommu_device_unregister(&mmu->iommu); + + return 0; +} + +static struct platform_driver grant_dma_iommu_driver = { + .driver = { + .name = "grant-dma-iommu", + .of_match_table = grant_dma_iommu_of_match, + }, + .probe = grant_dma_iommu_probe, + .remove = grant_dma_iommu_remove, +}; + +static int __init grant_dma_iommu_init(void) +{ + struct device_node *iommu_np; + + iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match); + if (!iommu_np) + return 0; + + of_node_put(iommu_np); + + return platform_driver_register(&grant_dma_iommu_driver); +} +subsys_initcall(grant_dma_iommu_init); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c new file mode 100644 index 000000000000..fc0142484001 --- /dev/null +++ b/drivers/xen/grant-dma-ops.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xen grant DMA-mapping layer - contains special DMA-mapping routines + * for providing grant references as DMA addresses to be used by frontends + * (e.g. virtio) in Xen guests + * + * Copyright (c) 2021, Juergen Gross <jgross@suse.com> + */ + +#include <linux/module.h> +#include <linux/dma-map-ops.h> +#include <linux/of.h> +#include <linux/pfn.h> +#include <linux/xarray.h> +#include <xen/xen.h> +#include <xen/xen-ops.h> +#include <xen/grant_table.h> + +struct xen_grant_dma_data { + /* The ID of backend domain */ + domid_t backend_domid; + /* Is device behaving sane? */ + bool broken; +}; + +static DEFINE_XARRAY(xen_grant_dma_devices); + +#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) + +static inline dma_addr_t grant_to_dma(grant_ref_t grant) +{ + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); +} + +static inline grant_ref_t dma_to_grant(dma_addr_t dma) +{ + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); +} + +static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) +{ + struct xen_grant_dma_data *data; + + xa_lock(&xen_grant_dma_devices); + data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); + xa_unlock(&xen_grant_dma_devices); + + return data; +} + +/* + * DMA ops for Xen frontends (e.g. virtio). + * + * Used to act as a kind of software IOMMU for Xen guests by using grants as + * DMA addresses. + * Such a DMA address is formed by using the grant reference as a frame + * number and setting the highest address bit (this bit is for the backend + * to be able to distinguish it from e.g. a mmio address). + */ +static void *xen_grant_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + unsigned long pfn; + grant_ref_t grant; + void *ret; + + data = find_xen_grant_dma_data(dev); + if (!data) + return NULL; + + if (unlikely(data->broken)) + return NULL; + + ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + if (!ret) + return NULL; + + pfn = virt_to_pfn(ret); + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { + free_pages_exact(ret, n_pages * PAGE_SIZE); + return NULL; + } + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + pfn_to_gfn(pfn + i), 0); + } + + *dma_handle = grant_to_dma(grant); + + return ret; +} + +static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); + + free_pages_exact(vaddr, n_pages * PAGE_SIZE); +} + +static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, + enum dma_data_direction dir, + gfp_t gfp) +{ + void *vaddr; + + vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); + if (!vaddr) + return NULL; + + return virt_to_page(vaddr); +} + +static void xen_grant_dma_free_pages(struct device *dev, size_t size, + struct page *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); +} + +static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + dma_addr_t dma_handle; + + if (WARN_ON(dir == DMA_NONE)) + return DMA_MAPPING_ERROR; + + data = find_xen_grant_dma_data(dev); + if (!data) + return DMA_MAPPING_ERROR; + + if (unlikely(data->broken)) + return DMA_MAPPING_ERROR; + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) + return DMA_MAPPING_ERROR; + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + } + + dma_handle = grant_to_dma(grant) + offset; + + return dma_handle; +} + +static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + if (WARN_ON(dir == DMA_NONE)) + return; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); +} + +static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return; + + for_each_sg(sg, s, nents, i) + xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + attrs); +} + +static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + if (s->dma_address == DMA_MAPPING_ERROR) + goto out; + + sg_dma_len(s) = s->length; + } + + return nents; + +out: + xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sg) = 0; + + return -EIO; +} + +static int xen_grant_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, + .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = xen_grant_dma_map_page, + .unmap_page = xen_grant_dma_unmap_page, + .map_sg = xen_grant_dma_map_sg, + .unmap_sg = xen_grant_dma_unmap_sg, + .dma_supported = xen_grant_dma_supported, +}; + +bool xen_is_grant_dma_device(struct device *dev) +{ + struct device_node *iommu_np; + bool has_iommu; + + /* XXX Handle only DT devices for now */ + if (!dev->of_node) + return false; + + iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); + has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + of_node_put(iommu_np); + + return has_iommu; +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + struct of_phandle_args iommu_spec; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + + /* XXX ACPI device unsupported for now */ + if (!dev->of_node) + goto err; + + if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_err(dev, "Cannot parse iommus property\n"); + goto err; + } + + if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || + iommu_spec.args_count != 1) { + dev_err(dev, "Incompatible IOMMU node\n"); + of_node_put(iommu_spec.np); + goto err; + } + + of_node_put(iommu_spec.np); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + + /* + * The endpoint ID here means the ID of the domain where the corresponding + * backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_KERNEL))) { + dev_err(dev, "Cannot store Xen grant DMA data\n"); + goto err; + } + + dev->dma_ops = &xen_grant_dma_ops; + + return; + +err: + dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); +} + +MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); +MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7a18292540bc..738029de3c67 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -33,6 +33,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/bitmap.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/mm.h> @@ -70,9 +71,32 @@ static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; + +/* + * Handling of free grants: + * + * Free grants are in a simple list anchored in gnttab_free_head. They are + * linked by grant ref, the last element contains GNTTAB_LIST_END. The number + * of free entries is stored in gnttab_free_count. + * Additionally there is a bitmap of free entries anchored in + * gnttab_free_bitmap. This is being used for simplifying allocation of + * multiple consecutive grants, which is needed e.g. for support of virtio. + * gnttab_last_free is used to add free entries of new frames at the end of + * the free list. + * gnttab_free_tail_ptr specifies the variable which references the start + * of consecutive free grants ending with gnttab_last_free. This pointer is + * updated in a rather defensive way, in order to avoid performance hits in + * hot paths. + * All those variables are protected by gnttab_list_lock. + */ static int gnttab_free_count; -static grant_ref_t gnttab_free_head; +static unsigned int gnttab_size; +static grant_ref_t gnttab_free_head = GNTTAB_LIST_END; +static grant_ref_t gnttab_last_free = GNTTAB_LIST_END; +static grant_ref_t *gnttab_free_tail_ptr; +static unsigned long *gnttab_free_bitmap; static DEFINE_SPINLOCK(gnttab_list_lock); + struct grant_frames xen_auto_xlat_grant_frames; static unsigned int xen_gnttab_version; module_param_named(version, xen_gnttab_version, uint, 0); @@ -168,16 +192,116 @@ static int get_free_entries(unsigned count) ref = head = gnttab_free_head; gnttab_free_count -= count; - while (count-- > 1) - head = gnttab_entry(head); + while (count--) { + bitmap_clear(gnttab_free_bitmap, head, 1); + if (gnttab_free_tail_ptr == __gnttab_entry(head)) + gnttab_free_tail_ptr = &gnttab_free_head; + if (count) + head = gnttab_entry(head); + } gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_last_free = GNTTAB_LIST_END; + gnttab_free_tail_ptr = NULL; + } + spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } +static int get_seq_entry_count(void) +{ + if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr || + *gnttab_free_tail_ptr == GNTTAB_LIST_END) + return 0; + + return gnttab_last_free - *gnttab_free_tail_ptr + 1; +} + +/* Rebuilds the free grant list and tries to find count consecutive entries. */ +static int get_free_seq(unsigned int count) +{ + int ret = -ENOSPC; + unsigned int from, to; + grant_ref_t *last; + + gnttab_free_tail_ptr = &gnttab_free_head; + last = &gnttab_free_head; + + for (from = find_first_bit(gnttab_free_bitmap, gnttab_size); + from < gnttab_size; + from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) { + to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size, + from + 1); + if (ret < 0 && to - from >= count) { + ret = from; + bitmap_clear(gnttab_free_bitmap, ret, count); + from += count; + gnttab_free_count -= count; + if (from == to) + continue; + } + + /* + * Recreate the free list in order to have it properly sorted. + * This is needed to make sure that the free tail has the maximum + * possible size. + */ + while (from < to) { + *last = from; + last = __gnttab_entry(from); + gnttab_last_free = from; + from++; + } + if (to < gnttab_size) + gnttab_free_tail_ptr = __gnttab_entry(to - 1); + } + + *last = GNTTAB_LIST_END; + if (gnttab_last_free != gnttab_size - 1) + gnttab_free_tail_ptr = NULL; + + return ret; +} + +static int get_free_entries_seq(unsigned int count) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&gnttab_list_lock, flags); + + if (gnttab_free_count < count) { + ret = gnttab_expand(count - gnttab_free_count); + if (ret < 0) + goto out; + } + + if (get_seq_entry_count() < count) { + ret = get_free_seq(count); + if (ret >= 0) + goto out; + ret = gnttab_expand(count - get_seq_entry_count()); + if (ret < 0) + goto out; + } + + ret = *gnttab_free_tail_ptr; + *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1); + gnttab_free_count -= count; + if (!gnttab_free_count) + gnttab_free_tail_ptr = NULL; + bitmap_clear(gnttab_free_bitmap, ret, count); + + out: + spin_unlock_irqrestore(&gnttab_list_lock, flags); + + return ret; +} + static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; @@ -204,21 +328,51 @@ static inline void check_free_callbacks(void) do_free_callbacks(); } -static void put_free_entry(grant_ref_t ref) +static void put_free_entry_locked(grant_ref_t ref) { - unsigned long flags; - if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES)) return; - spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; + if (!gnttab_free_count) + gnttab_last_free = ref; + if (gnttab_free_tail_ptr == &gnttab_free_head) + gnttab_free_tail_ptr = __gnttab_entry(ref); gnttab_free_count++; + bitmap_set(gnttab_free_bitmap, ref, 1); +} + +static void put_free_entry(grant_ref_t ref) +{ + unsigned long flags; + + spin_lock_irqsave(&gnttab_list_lock, flags); + put_free_entry_locked(ref); check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } +static void gnttab_set_free(unsigned int start, unsigned int n) +{ + unsigned int i; + + for (i = start; i < start + n - 1; i++) + gnttab_entry(i) = i + 1; + + gnttab_entry(i) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_free_head = start; + gnttab_free_tail_ptr = &gnttab_free_head; + } else { + gnttab_entry(gnttab_last_free) = start; + } + gnttab_free_count += n; + gnttab_last_free = i; + + bitmap_set(gnttab_free_bitmap, start, n); +} + /* * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. * Introducing a valid entry into the grant table: @@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; - int count = 1; - if (head == GNTTAB_LIST_END) - return; + spin_lock_irqsave(&gnttab_list_lock, flags); - ref = head; - while (gnttab_entry(ref) != GNTTAB_LIST_END) { - ref = gnttab_entry(ref); - count++; + while (head != GNTTAB_LIST_END) { + ref = gnttab_entry(head); + put_free_entry_locked(head); + head = ref; } - gnttab_entry(ref) = gnttab_free_head; - gnttab_free_head = head; - gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&gnttab_list_lock, flags); + for (i = count; i > 0; i--) + put_free_entry_locked(head + i - 1); + check_free_callbacks(); + spin_unlock_irqrestore(&gnttab_list_lock, flags); +} +EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq); + int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); @@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first) +{ + int h; + + if (count == 1) + h = get_free_entries(1); + else + h = get_free_entries_seq(count); + + if (h < 0) + return -ENOSPC; + + *first = h; + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq); + int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); @@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames) goto grow_nomem; } + gnttab_set_free(gnttab_size, extra_entries); - for (i = grefs_per_frame * nr_grant_frames; - i < grefs_per_frame * new_nr_grant_frames - 1; i++) - gnttab_entry(i) = i + 1; - - gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = grefs_per_frame * nr_grant_frames; - gnttab_free_count += extra_entries; + if (!gnttab_free_tail_ptr) + gnttab_free_tail_ptr = __gnttab_entry(gnttab_size); nr_grant_frames = new_nr_grant_frames; + gnttab_size += extra_entries; check_free_callbacks(); @@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries) int gnttab_init(void) { int i; - unsigned long max_nr_grant_frames; + unsigned long max_nr_grant_frames, max_nr_grefs; unsigned int max_nr_glist_frames, nr_glist_frames; - unsigned int nr_init_grefs; int ret; gnttab_request_version(); max_nr_grant_frames = gnttab_max_grant_frames(); + max_nr_grefs = max_nr_grant_frames * + gnttab_interface->grefs_per_grant_frame; nr_grant_frames = 1; /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - max_nr_glist_frames = (max_nr_grant_frames * - gnttab_interface->grefs_per_grant_frame / RPP); + max_nr_glist_frames = max_nr_grefs / RPP; gnttab_list = kmalloc_array(max_nr_glist_frames, sizeof(grant_ref_t *), @@ -1454,6 +1631,12 @@ int gnttab_init(void) } } + gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL); + if (!gnttab_free_bitmap) { + ret = -ENOMEM; + goto ini_nomem; + } + ret = arch_gnttab_init(max_nr_grant_frames, nr_status_frames(max_nr_grant_frames)); if (ret < 0) @@ -1464,15 +1647,10 @@ int gnttab_init(void) goto ini_nomem; } - nr_init_grefs = nr_grant_frames * - gnttab_interface->grefs_per_grant_frame; - - for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) - gnttab_entry(i) = i + 1; + gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame; - gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; - gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES; - gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES; + gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES, + gnttab_size - GNTTAB_NR_RESERVED_ENTRIES); printk("Grant table initialized\n"); return 0; @@ -1481,6 +1659,7 @@ int gnttab_init(void) for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); + bitmap_free(gnttab_free_bitmap); return ret; } EXPORT_SYMBOL_GPL(gnttab_init); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 34742c6e189e..f17c4c03db30 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); struct remap_pfn { struct mm_struct *mm; |