diff options
Diffstat (limited to 'drivers')
73 files changed, 777 insertions, 858 deletions
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index a0e84538cec8..1fa58c059cbf 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) usb_free_urb(urb); - return 0; + return err; } static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ed455de598ea..a9c35ebb30f8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, @@ -1201,10 +1204,6 @@ static int btusb_open(struct hci_dev *hdev) } data->intf->needs_remote_wakeup = 1; - /* device specific wakeup source enabled and required for USB - * remote wakeup while host is suspended - */ - device_wakeup_enable(&data->udev->dev); /* Disable device remote wakeup when host is suspended * For Realtek chips, global suspend without @@ -1281,7 +1280,6 @@ static int btusb_close(struct hci_dev *hdev) if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) data->intf->needs_remote_wakeup = 1; - device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf); failed: diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index d33828fef89f..e3164c200eac 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work) ws_awake_device); struct hci_uart *hu = qca->hu; unsigned long retrans_delay; + unsigned long flags; BT_DBG("hu %p wq awake device", hu); /* Vote for serial clock */ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); - spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); /* Send wake indication to device */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) @@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work) retrans_delay = msecs_to_jiffies(qca->wake_retrans); mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); - spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); @@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work) struct qca_data *qca = container_of(work, struct qca_data, ws_awake_rx); struct hci_uart *hu = qca->hu; + unsigned long flags; BT_DBG("hu %p wq awake rx", hu); serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); - spin_lock(&qca->hci_ibs_lock); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->rx_ibs_state = HCI_IBS_RX_AWAKE; /* Always acknowledge device wake up, @@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work) qca->ibs_sent_wacks++; - spin_unlock(&qca->hci_ibs_lock); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 9c41a4e42575..1072c450c37a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -192,6 +192,7 @@ struct rcar_dmac_chan { * @iomem: remapped I/O memory base * @n_channels: number of available channels * @channels: array of DMAC channels + * @channels_mask: bitfield of which DMA channels are managed by this driver * @modules: bitmask of client modules in use */ struct rcar_dmac { @@ -202,6 +203,7 @@ struct rcar_dmac { unsigned int n_channels; struct rcar_dmac_chan *channels; + unsigned int channels_mask; DECLARE_BITMAP(modules, 256); }; @@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac) u16 dmaor; /* Clear all channels and enable the DMAC globally. */ - rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); + rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); rcar_dmac_write(dmac, RCAR_DMAOR, RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); @@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) for (i = 0; i < dmac->n_channels; ++i) { struct rcar_dmac_chan *chan = &dmac->channels[i]; + if (!(dmac->channels_mask & BIT(i))) + continue; + /* Stop and reinitialize the channel. */ spin_lock_irq(&chan->lock); rcar_dmac_chan_halt(chan); @@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, return 0; } +#define RCAR_DMAC_MAX_CHANNELS 32 + static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) { struct device_node *np = dev->of_node; @@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) return ret; } - if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { + /* The hardware and driver don't support more than 32 bits in CHCLR */ + if (dmac->n_channels <= 0 || + dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); return -EINVAL; } + dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); + return 0; } @@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; - unsigned int channels_offset = 0; struct dma_device *engine; struct rcar_dmac *dmac; struct resource *mem; @@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) * level we can't disable it selectively, so ignore channel 0 for now if * the device is part of an IOMMU group. */ - if (device_iommu_mapped(&pdev->dev)) { - dmac->n_channels--; - channels_offset = 1; - } + if (device_iommu_mapped(&pdev->dev)) + dmac->channels_mask &= ~BIT(0); dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); @@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev) INIT_LIST_HEAD(&engine->channels); for (i = 0; i < dmac->n_channels; ++i) { - ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], - i + channels_offset); + if (!(dmac->channels_mask & BIT(i))) + continue; + + ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); if (ret < 0) goto error; } diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index baac476c8622..525dc7338fe3 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct dma_slave_config *slave_cfg = &schan->slave_cfg; dma_addr_t src = 0, dst = 0; + dma_addr_t start_src = 0, start_dst = 0; struct sprd_dma_desc *sdesc; struct scatterlist *sg; u32 len = 0; @@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dst = sg_dma_address(sg); } + if (!i) { + start_src = src; + start_dst = dst; + } + /* * The link-list mode needs at least 2 link-list * configurations. If there is only one sg, it doesn't @@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } } - ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, - dir, flags, slave_cfg); + ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src, + start_dst, len, dir, flags, slave_cfg); if (ret) { kfree(sdesc); return NULL; diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index ad2f0a4cd6a4..f255056696ee 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); - if (ret) + if (ret) { + kfree(rsv_events); return ret; + } for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index ba27802efcd0..d07c0d5de7a2 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev) rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, IRQF_SHARED, "omap-dma-engine", od); - if (rc) + if (rc) { + omap_dma_free(od); return rc; + } } if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index f1a9c0544e3f..213aedc97dc2 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = { .read = gpio_mockup_debugfs_read, .write = gpio_mockup_debugfs_write, .llseek = no_llseek, + .release = single_release, }; static void gpio_mockup_debugfs_setup(struct device *dev, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 378b206d2dc9..48fea4c68e8d 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) u8 new_irqs; int level, i; u8 invert_irq_mask[MAX_BANK]; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); if (chip->driver_data & PCA_PCAL) { /* Enable latch on interrupt-enabled inputs */ @@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) bool pending_seen = false; bool trigger_seen = false; u8 trigger[MAX_BANK]; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; int ret, i; if (chip->driver_data & PCA_PCAL) { @@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) return false; /* Remove output pins from the equation */ - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); for (i = 0; i < NBANK(chip); i++) cur_stat[i] &= reg_direction[i]; @@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, { struct i2c_client *client = chip->client; struct irq_chip *irq_chip = &chip->irq_chip; - int reg_direction[MAX_BANK]; + u8 reg_direction[MAX_BANK]; int ret, i; if (!client->irq) @@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, * interrupt. We have to rely on the previous read for * this purpose. */ - regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, - NBANK(chip)); + pca953x_read_regs(chip, chip->regs->direction, reg_direction); for (i = 0; i < NBANK(chip); i++) chip->irq_stat[i] &= reg_direction[i]; mutex_init(&chip->irq_lock); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 39f2f9035c11..bda28eb82c3f 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -7,6 +7,7 @@ * Mika Westerberg <mika.westerberg@linux.intel.com> */ +#include <linux/dmi.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> @@ -19,6 +20,11 @@ #include "gpiolib.h" +static int run_edge_events_on_boot = -1; +module_param(run_edge_events_on_boot, int, 0444); +MODULE_PARM_DESC(run_edge_events_on_boot, + "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, event->irq_requested = true; /* Make sure we trigger the initial state of edge-triggered IRQs */ - value = gpiod_get_raw_value_cansleep(event->desc); - if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) - event->handler(event->irq, event); + if (run_edge_events_on_boot && + (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { + value = gpiod_get_raw_value_cansleep(event->desc); + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + event->handler(event->irq, event); + } } static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) @@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void) } /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); + +static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), + DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), + } + }, + {} /* Terminating entry */ +}; + +static int acpi_gpio_setup_params(void) +{ + if (run_edge_events_on_boot < 0) { + if (dmi_check_system(run_edge_events_on_boot_blacklist)) + run_edge_events_on_boot = 0; + else + run_edge_events_on_boot = 1; + } + + return 0; +} + +/* Directly after dmi_setup() which runs as core_initcall() */ +postcore_initcall(acpi_gpio_setup_params); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 9762dd6d99fa..9b44c49a9227 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -343,36 +343,27 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, &of_flags); - /* - * -EPROBE_DEFER in our case means that we found a - * valid GPIO property, but no controller has been - * registered so far. - * - * This means we don't need to look any further for - * alternate name conventions, and we should really - * preserve the return code for our user to be able to - * retry probing later. - */ - if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) - return desc; - if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) + if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT) break; } - /* Special handling for SPI GPIOs if used */ - if (IS_ERR(desc)) + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { + /* Special handling for SPI GPIOs if used */ desc = of_find_spi_gpio(dev, con_id, &of_flags); - if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) { + } + + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { /* This quirk looks up flags and all */ desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); if (!IS_ERR(desc)) return desc; } - /* Special handling for regulator GPIOs if used */ - if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) { + /* Special handling for regulator GPIOs if used */ desc = of_find_regulator_gpio(dev, con_id, &of_flags); + } if (IS_ERR(desc)) return desc; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cca749010cd0..d9074191edef 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -536,6 +536,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) return -EINVAL; /* + * Do not allow both INPUT & OUTPUT flags to be set as they are + * contradictory. + */ + if ((lflags & GPIOHANDLE_REQUEST_INPUT) && + (lflags & GPIOHANDLE_REQUEST_OUTPUT)) + return -EINVAL; + + /* * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If * the hardware actually supports enabling both at the same time the * electrical result would be disastrous. @@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } /* This is just wrong: we don't look for events on output lines */ - if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || + (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || + (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) { ret = -EINVAL; goto out_free_label; } @@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); ret = gpiod_direction_input(desc); if (ret) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index b0369e690f36..c814bcef18a4 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr, } static int drm_mode_parse_cmdline_extra(const char *str, int length, + bool freestanding, const struct drm_connector *connector, struct drm_cmdline_mode *mode) { @@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length, for (i = 0; i < length; i++) { switch (str[i]) { case 'i': + if (freestanding) + return -EINVAL; + mode->interlace = true; break; case 'm': + if (freestanding) + return -EINVAL; + mode->margins = true; break; case 'D': @@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length, if (extras) { int ret = drm_mode_parse_cmdline_extra(end_ptr + i, 1, + false, connector, mode); if (ret) @@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, return 0; } +static const char * const drm_named_modes_whitelist[] = { + "NTSC", + "PAL", +}; + +static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) + if (!strncmp(mode, drm_named_modes_whitelist[i], size)) + return true; + + return false; +} + /** * drm_mode_parse_command_line_for_connector - parse command line modeline for connector * @mode_option: optional per connector mode option @@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, * bunch of things: * - We need to make sure that the first character (which * would be our resolution in X) is a digit. - * - However, if the X resolution is missing, then we end up - * with something like x<yres>, with our first character - * being an alpha-numerical character, which would be - * considered a named mode. + * - If not, then it's either a named mode or a force on/off. + * To distinguish between the two, we need to run the + * extra parsing function, and if not, then we consider it + * a named mode. * * If this isn't enough, we should add more heuristics here, * and matching unit-tests. */ - if (!isdigit(name[0]) && name[0] != 'x') + if (!isdigit(name[0]) && name[0] != 'x') { + unsigned int namelen = strlen(name); + + /* + * Only the force on/off options can be in that case, + * and they all take a single character. + */ + if (namelen == 1) { + ret = drm_mode_parse_cmdline_extra(name, namelen, true, + connector, mode); + if (!ret) + return true; + } + named_mode = true; + } /* Try to locate the bpp and refresh specifiers, if any */ bpp_ptr = strchr(name, '-'); @@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, if (named_mode) { if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) return false; + + if (!drm_named_mode_is_in_whitelist(name, mode_end)) + return false; + strscpy(mode->name, name, mode_end + 1); } else { ret = drm_mode_parse_cmdline_res_mode(name, mode_end, @@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, extra_ptr != options_ptr) { int len = strlen(name) - (extra_ptr - name); - ret = drm_mode_parse_cmdline_extra(extra_ptr, len, + ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false, connector, mode); if (ret) return false; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 18e4cba76720..8aa6a31e8ad0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config); - limits.max_bpp = pipe_config->pipe_bpp; + /* + * FIXME: If all the streams can't fit into the link with + * their current pipe_bpp we should reduce pipe_bpp across + * the board until things start to fit. Until then we + * limit to <= 8bpc since that's what was hardcoded for all + * MST streams previously. This hack should be removed once + * we have the proper retry logic in place. + */ + limits.max_bpp = min(pipe_config->pipe_bpp, 24); intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 2caa594322bc..528b61678334 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -664,15 +664,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - /* - * As this may not be anonymous memory (e.g. shmem) - * but exist on a real mapping, we have to lock - * the page in order to dirty it -- holding - * the page reference is not sufficient to - * prevent the inode from being truncated. - * Play safe and take the lock. - */ - set_page_dirty_lock(page); + set_page_dirty(page); mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 98dfb086320f..99e8242194c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ - if (!IS_COFFEELAKE(i915)) - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index e9f9e9fb9b17..6381652a8829 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev) return ret; } - if (panel) { + if (panel) bridge = devm_drm_panel_bridge_add(dev, panel, - DRM_MODE_CONNECTOR_Unknown); - } + DRM_MODE_CONNECTOR_DPI); priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc), &priv->dma_hwdesc_phys, diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 477c0f766663..b609dc030d6c 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) timeout = drm_timeout_abs_to_jiffies(timeout_ns); ret = drm_gem_reservation_object_wait(file, handle, write, timeout); - if (ret == 0) + if (ret == -ETIME) ret = timeout ? -ETIMEDOUT : -EBUSY; return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c index 84a2f243ed9b..4695f1c8e33f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c @@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); @@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); @@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); @@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h index b45824ec7c8f..6d61a0eb5d64 100644 --- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h +++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h @@ -9,6 +9,13 @@ #define cmdline_test(test) selftest(test, test) +cmdline_test(drm_cmdline_test_force_d_only) +cmdline_test(drm_cmdline_test_force_D_only_dvi) +cmdline_test(drm_cmdline_test_force_D_only_hdmi) +cmdline_test(drm_cmdline_test_force_D_only_not_digital) +cmdline_test(drm_cmdline_test_force_e_only) +cmdline_test(drm_cmdline_test_margin_only) +cmdline_test(drm_cmdline_test_interlace_only) cmdline_test(drm_cmdline_test_res) cmdline_test(drm_cmdline_test_res_missing_x) cmdline_test(drm_cmdline_test_res_missing_y) diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c index 14c96edb13df..013de9d27c35 100644 --- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c +++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c @@ -17,6 +17,136 @@ static const struct drm_connector no_connector = {}; +static int drm_cmdline_test_force_e_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("e", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON); + + return 0; +} + +static int drm_cmdline_test_force_D_only_not_digital(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON); + + return 0; +} + +static const struct drm_connector connector_hdmi = { + .connector_type = DRM_MODE_CONNECTOR_HDMIB, +}; + +static int drm_cmdline_test_force_D_only_hdmi(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &connector_hdmi, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL); + + return 0; +} + +static const struct drm_connector connector_dvi = { + .connector_type = DRM_MODE_CONNECTOR_DVII, +}; + +static int drm_cmdline_test_force_D_only_dvi(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("D", + &connector_dvi, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL); + + return 0; +} + +static int drm_cmdline_test_force_d_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(!drm_mode_parse_command_line_for_connector("d", + &no_connector, + &mode)); + FAIL_ON(mode.specified); + FAIL_ON(mode.refresh_specified); + FAIL_ON(mode.bpp_specified); + + FAIL_ON(mode.rb); + FAIL_ON(mode.cvt); + FAIL_ON(mode.interlace); + FAIL_ON(mode.margins); + FAIL_ON(mode.force != DRM_FORCE_OFF); + + return 0; +} + +static int drm_cmdline_test_margin_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("m", + &no_connector, + &mode)); + + return 0; +} + +static int drm_cmdline_test_interlace_only(void *ignored) +{ + struct drm_cmdline_mode mode = { }; + + FAIL_ON(drm_mode_parse_command_line_for_connector("i", + &no_connector, + &mode)); + + return 0; +} + static int drm_cmdline_test_res(void *ignored) { struct drm_cmdline_mode mode = { }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 59e9d05ab928..0af048d1a815 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } - if (retries == RETRIES) { - kfree(reply); + if (!reply) return -EINVAL; - } *msg_len = reply_len; *msg = reply; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b607a92791d3..61de81965c44 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) +{ + struct iommu_cmd cmd; + + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain) * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static void increase_address_space(struct protection_domain *domain, gfp_t gfp) { + unsigned long flags; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) /* address space already 64 bit large */ - return false; + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); @@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain, domain->mode += 1; domain->updated = true; - return true; +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return; } static u64 *alloc_pte(struct protection_domain *domain, @@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, { u64 pte_root = 0; u64 flags = 0; + u32 old_domid; if (domain->mode != PAGE_MODE_NONE) pte_root = iommu_virt_to_phys(domain->pt_root); @@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, flags &= ~DEV_DOMID_MASK; flags |= domain->id; + old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; amd_iommu_dev_table[devid].data[1] = flags; amd_iommu_dev_table[devid].data[0] = pte_root; + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } static void clear_dte_entry(u16 devid) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 12d094d08c0a..c4e0e4a9ee9e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); +static void domain_context_clear(struct intel_iommu *iommu, + struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -2105,9 +2107,26 @@ out_unlock: return ret; } +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct pasid_table *table; +}; + +static int domain_context_mapping_cb(struct pci_dev *pdev, + u16 alias, void *opaque) +{ + struct domain_context_mapping_data *data = opaque; + + return domain_context_mapping_one(data->domain, data->iommu, + data->table, PCI_BUS_NUM(alias), + alias & 0xff); +} + static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { + struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - return domain_context_mapping_one(domain, iommu, table, bus, devfn); + + if (!dev_is_pci(dev)) + return domain_context_mapping_one(domain, iommu, table, + bus, devfn); + + data.domain = domain; + data.iommu = iommu; + data.table = table; + + return pci_for_each_dma_alias(to_pci_dev(dev), + &domain_context_mapping_cb, &data); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -4759,6 +4788,28 @@ out_free_dmar: return ret; } +static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) +{ + struct intel_iommu *iommu = opaque; + + domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + return 0; +} + +/* + * NB - intel-iommu lacks any sort of reference counting for the users of + * dependent devices. If multiple endpoints have intersecting dependent + * devices, unbinding the driver from any one of them will possibly leave + * the others unable to operate. + */ +static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || !dev || !dev_is_pci(dev)) + return; + + pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); +} + static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear_one(iommu, info->bus, info->devfn); + domain_context_clear(iommu, info->dev); intel_pasid_free_table(info->dev); } diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 780de0caafe8..9b159132405d 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) } static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, - unsigned long address, unsigned long pages, int ih, int gl) + unsigned long address, unsigned long pages, int ih) { struct qi_desc desc; - if (pages == -1) { - /* For global kernel pages we have to flush them in *all* PASIDs - * because that's the only option the hardware gives us. Despite - * the fact that they are actually only accessible through one. */ - if (gl) - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | - QI_EIOTLB_TYPE; - else - desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | - QI_EIOTLB_DID(sdev->did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; + /* + * Do PASID granu IOTLB invalidation if page selective capability is + * not available. + */ + if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | + QI_EIOTLB_DID(sdev->did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; desc.qw1 = 0; } else { int mask = ilog2(__roundup_pow_of_two(pages)); @@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; desc.qw1 = QI_EIOTLB_ADDR(address) | - QI_EIOTLB_GL(gl) | QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); } @@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d } static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, - unsigned long pages, int ih, int gl) + unsigned long pages, int ih) { struct intel_svm_dev *sdev; rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) - intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); + intel_flush_svm_range_dev(svm, sdev, address, pages, ih); rcu_read_unlock(); } @@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); intel_flush_svm_range(svm, start, - (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); + (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); } static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) @@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) rcu_read_lock(); list_for_each_entry_rcu(sdev, &svm->devs, list) { intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); } rcu_read_unlock(); @@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) * large and has to be physically contiguous. So it's * hard to be as defensive as we might like. */ intel_pasid_tear_down_entry(iommu, dev, svm->pasid); - intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); + intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 3c3ad42f22bf..c92b405b7646 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos if (!cdev->ap.applid) return -ENODEV; + if (count < CAPIMSG_BASELEN) + return -EINVAL; + skb = alloc_skb(count, GFP_USER); if (!skb) return -ENOMEM; @@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos } mlen = CAPIMSG_LEN(skb->data); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { + if (count < CAPI_DATA_B3_REQ_LEN || + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { kfree_skb(skb); return -EINVAL; } @@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { + if (count < CAPI_DISCONNECT_B3_RESP_LEN) { + kfree_skb(skb); + return -EINVAL; + } mutex_lock(&cdev->lock); capincci_free(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->lock); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 74e4364bc9fb..09113b9ad679 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (index == EXT_CSD_SANITIZE_START) cmd.sanitize_busy = true; - err = mmc_wait_for_cmd(host, &cmd, 0); + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) goto out; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 7e0d3a49c06d..bb31e13648d6 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host) struct dma_chan *terminate_chan = NULL; struct mmc_request *mrq; - cancel_delayed_work_sync(&host->timeout_work); + cancel_delayed_work(&host->timeout_work); mrq = host->mrq; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 64d3b5fb7fe5..4a2872f49a60 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -774,8 +774,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, /* All SDHI have SDIO status bits which must be 1 */ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS; - pm_runtime_enable(&pdev->dev); - ret = renesas_sdhi_clk_enable(host); if (ret) goto efree; @@ -856,8 +854,6 @@ edisclk: efree: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); - return ret; } EXPORT_SYMBOL_GPL(renesas_sdhi_probe); @@ -869,8 +865,6 @@ int renesas_sdhi_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); renesas_sdhi_clk_disable(host); - pm_runtime_disable(&pdev->dev); - return 0; } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 9dc4548271b4..19944b0049db 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) mmc_hostname(host->mmc)); host->flags &= ~SDHCI_SIGNALING_330; host->flags |= SDHCI_SIGNALING_180; - host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; host->mmc->caps2 |= MMC_CAP2_NO_SD; host->mmc->caps2 |= MMC_CAP2_NO_SDIO; pci_write_config_dword(chip->pdev, @@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = { const struct sdhci_pci_fixes sdhci_o2 = { .probe = sdhci_pci_o2_probe, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, .probe_slot = sdhci_pci_o2_probe_slot, #ifdef CONFIG_PM_SLEEP .resume = sdhci_pci_o2_resume, diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 8539e10784b4..93e83ad25976 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev) host->mmc->f_max = pdata->hclk; host->mmc->f_min = pdata->hclk / 512; - pm_runtime_enable(&pdev->dev); - ret = tmio_mmc_host_probe(host); if (ret) goto host_free; @@ -193,7 +191,6 @@ host_remove: tmio_mmc_host_remove(host); host_free: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); cell_disable: if (cell->disable) cell->disable(pdev); @@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev) if (cell->disable) cell->disable(pdev); - pm_runtime_disable(&pdev->dev); - return 0; } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c5ba13fae399..2f0b092d6dcc 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -163,6 +163,7 @@ struct tmio_mmc_host { unsigned long last_req_ts; struct mutex ios_lock; /* protect set_ios() context */ bool native_hotplug; + bool runtime_synced; bool sdio_irq_enabled; /* Mandatory callback */ diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 2cb3f951c3e2..9b6e1001e77c 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host) } EXPORT_SYMBOL_GPL(tmio_mmc_host_free); -/** - * tmio_mmc_host_probe() - Common probe for all implementations - * @_host: Host to probe - * - * Perform tasks common to all implementations probe functions. - * - * The caller should have called pm_runtime_enable() prior to calling - * the common probe function. - */ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) { struct platform_device *pdev = _host->pdev; @@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); - pm_runtime_set_active(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 50); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); ret = mmc_add_host(mmc); if (ret) goto remove_host; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + pm_runtime_put(&pdev->dev); return 0; remove_host: + pm_runtime_put_noidle(&pdev->dev); tmio_mmc_host_remove(_host); return ret; } @@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) struct platform_device *pdev = host->pdev; struct mmc_host *mmc = host->mmc; + pm_runtime_get_sync(&pdev->dev); + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); - if (!host->native_hotplug) - pm_runtime_get_sync(&pdev->dev); - dev_pm_qos_hide_latency_limit(&pdev->dev); mmc_remove_host(mmc); @@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) tmio_mmc_release_dma(host); pm_runtime_dont_use_autosuspend(&pdev->dev); + if (host->native_hotplug) + pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); } EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); @@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev) { struct tmio_mmc_host *host = dev_get_drvdata(dev); + if (!host->runtime_synced) { + host->runtime_synced = true; + return 0; + } + tmio_mmc_clk_enable(host); tmio_mmc_hw_reset(host->mmc); diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 49aad9a79c18..91a2be41edf6 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -631,7 +631,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) host->clk_disable = uniphier_sd_clk_disable; host->set_clock = uniphier_sd_set_clock; - pm_runtime_enable(&pdev->dev); ret = uniphier_sd_clk_enable(host); if (ret) goto free_host; @@ -653,7 +652,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) free_host: tmio_mmc_host_free(host); - pm_runtime_disable(&pdev->dev); return ret; } @@ -664,7 +662,6 @@ static int uniphier_sd_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); uniphier_sd_clk_disable(host); - pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 58c6231aaa00..87dece0e745d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", .reset_level = HNAE3_GLOBAL_RESET }, { .int_msk = BIT(3), .msg = "tx_buf_overflow", .reset_level = HNAE3_GLOBAL_RESET }, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4f83f97ffe8b..2e5172f61564 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { if (adapter->state == VNIC_REMOVING || - adapter->state == VNIC_REMOVED) - goto out; + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + } if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; @@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work) netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } -out: + adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dc034f4e8cf6..1ce2397306b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -36,6 +36,7 @@ #include <net/vxlan.h> #include <net/mpls.h> #include <net/xdp_sock.h> +#include <net/xfrm.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -2623,7 +2624,7 @@ adjust_by_size: /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; - } else if (avg_wire_size <= 1980) { + } else if (avg_wire_size < 1968) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; @@ -2656,6 +2657,8 @@ adjust_by_size: case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: + if (avg_wire_size > 8064) + avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; @@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_IPSEC - if (secpath_exists(skb) && + if (xfrm_offload(skb) && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ad802a8909e0..a37dcd140f63 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; - u32 i = tx_ring->next_to_clean, xsk_frames = 0; - unsigned int budget = q_vector->tx.work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; - bool xmit_done; + u32 xsk_frames = 0; - tx_bi = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC(tx_ring, i); - i -= tx_ring->count; + tx_bi = &tx_ring->tx_buffer_info[ntc]; + tx_desc = IXGBE_TX_DESC(tx_ring, ntc); - do { + while (ntc != ntu) { if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; @@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, tx_bi++; tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; + ntc++; + if (unlikely(ntc == tx_ring->count)) { + ntc = 0; tx_bi = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); + } - /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; + tx_ring->next_to_clean = ntc; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; @@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); } - xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); - - return budget > 0 && xmit_done; + return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75e93ce2ed99..076f2da36f27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -30,6 +30,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/atomic.h> +#include <net/xfrm.h> #include "ixgbevf.h" @@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->protocol = vlan_get_protocol(skb); #ifdef CONFIG_IXGBEVF_IPSEC - if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ef3f3d06ff1e..fce9b3a24347 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_dev_port(dev, i, &port_cap)) { mlx4_err(dev, - "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n"); } else if ((dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_DEFAULT) && (port_cap.dmfs_optimized_state == diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index d0a01e8f000a..b339125b2f09 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); if (!laddr) { - printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); - dev_kfree_skb(skb); - return NETDEV_TX_BUSY; + pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index d5bbe3d6048b..05981b54eaab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) type = cmsg_hdr->type; switch (type) { - case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: - nfp_flower_cmsg_portreify_rx(app, skb); - break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; @@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) struct nfp_flower_priv *priv = app->priv; struct sk_buff_head *skb_head; - if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || - type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) + if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) skb_head = &priv->cmsg_skbs_high; else skb_head = &priv->cmsg_skbs_low; @@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { /* Acks from the NFP that the route is added - ignore. */ dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) { + /* Handle REIFY acks outside wq to prevent RTNL conflict. */ + nfp_flower_cmsg_portreify_rx(app, skb); + dev_consume_skb_any(skb); } else { nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ecca794c55e2..05d2b478c99b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -713,6 +713,21 @@ struct nv_skb_map { struct nv_skb_map *next_tx_ctx; }; +struct nv_txrx_stats { + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; +}; + +#define nv_txrx_stats_inc(member) \ + __this_cpu_inc(np->txrx_stats->member) +#define nv_txrx_stats_add(member, count) \ + __this_cpu_add(np->txrx_stats->member, (count)) + /* * SMP locking: * All hardware access under netdev_priv(dev)->lock, except the performance @@ -797,10 +812,7 @@ struct fe_priv { /* RX software stats */ struct u64_stats_sync swstats_rx_syncp; - u64 stat_rx_packets; - u64 stat_rx_bytes; /* not always available in HW */ - u64 stat_rx_missed_errors; - u64 stat_rx_dropped; + struct nv_txrx_stats __percpu *txrx_stats; /* media detection workaround. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); @@ -826,9 +838,6 @@ struct fe_priv { /* TX software stats */ struct u64_stats_sync swstats_tx_syncp; - u64 stat_tx_packets; /* not always available in HW */ - u64 stat_tx_bytes; - u64 stat_tx_dropped; /* msi/msi-x fields */ u32 msi_flags; @@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev) } } +static void nv_get_stats(int cpu, struct fe_priv *np, + struct rtnl_link_stats64 *storage) +{ + struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); + unsigned int syncp_start; + u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; + u64 tx_packets, tx_bytes, tx_dropped; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + rx_packets = src->stat_rx_packets; + rx_bytes = src->stat_rx_bytes; + rx_dropped = src->stat_rx_dropped; + rx_missed_errors = src->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + + storage->rx_packets += rx_packets; + storage->rx_bytes += rx_bytes; + storage->rx_dropped += rx_dropped; + storage->rx_missed_errors += rx_missed_errors; + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + tx_packets = src->stat_tx_packets; + tx_bytes = src->stat_tx_bytes; + tx_dropped = src->stat_tx_dropped; + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + + storage->tx_packets += tx_packets; + storage->tx_bytes += tx_bytes; + storage->tx_dropped += tx_dropped; +} + /* * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. @@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) __releases(&netdev_priv(dev)->hwstats_lock) { struct fe_priv *np = netdev_priv(dev); - unsigned int syncp_start; + int cpu; /* * Note: because HW stats are not always available and for @@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) */ /* software stats */ - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); - storage->rx_packets = np->stat_rx_packets; - storage->rx_bytes = np->stat_rx_bytes; - storage->rx_dropped = np->stat_rx_dropped; - storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); - - do { - syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); - storage->tx_packets = np->stat_tx_packets; - storage->tx_bytes = np->stat_tx_bytes; - storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + for_each_online_cpu(cpu) + nv_get_stats(cpu, np, storage); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev) } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_dropped++; + nv_txrx_stats_inc(stat_rx_dropped); u64_stats_update_end(&np->swstats_rx_syncp); return 1; } @@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev) } if (nv_release_txskb(np, &np->tx_skb[i])) { u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); } np->tx_skb[i].dma = 0; @@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_dropped++; + nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); return NETDEV_TX_OK; } @@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } bytes_compl += np->get_tx_ctx->skb->len; @@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) nv_legacybackoff_reseed(dev); } } else { + unsigned int len; + u64_stats_update_begin(&np->swstats_tx_syncp); - np->stat_tx_packets++; - np->stat_tx_bytes += np->get_tx_ctx->skb->len; + nv_txrx_stats_inc(stat_tx_packets); + len = np->get_tx_ctx->skb->len; + nv_txrx_stats_add(stat_tx_bytes, len); u64_stats_update_end(&np->swstats_tx_syncp); } @@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) } } +static void rx_missing_handler(u32 flags, struct fe_priv *np) +{ + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + nv_txrx_stats_inc(stat_rx_missed_errors); + u64_stats_update_end(&np->swstats_rx_syncp); + } +} + static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); @@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* the rest are hard errors */ else { - if (flags & NV_RX_MISSEDFRAME) { - u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_missed_errors++; - u64_stats_update_end(&np->swstats_rx_syncp); - } + rx_missing_handler(flags, np); dev_kfree_skb(skb); goto next_pkt; } @@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) @@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); - np->stat_rx_packets++; - np->stat_rx_bytes += len; + nv_txrx_stats_inc(stat_rx_packets); + nv_txrx_stats_add(stat_rx_bytes, len); u64_stats_update_end(&np->swstats_rx_syncp); } else { dev_kfree_skb(skb); @@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) SET_NETDEV_DEV(dev, &pci_dev->dev); u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); + np->txrx_stats = alloc_percpu(struct nv_txrx_stats); + if (!np->txrx_stats) { + pr_err("np->txrx_stats, alloc memory error.\n"); + err = -ENOMEM; + goto out_alloc_percpu; + } timer_setup(&np->oom_kick, nv_do_rx_refill, 0); timer_setup(&np->nic_poll, nv_do_nic_poll, 0); @@ -6060,6 +6110,8 @@ out_relreg: out_disable: pci_disable_device(pci_dev); out_free: + free_percpu(np->txrx_stats); +out_alloc_percpu: free_netdev(dev); out: return err; @@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev) static void nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + + free_percpu(np->txrx_stats); unregister_netdev(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4083019c547a..f97a4096f8fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) int ret; u32 reg, val; - regmap_field_read(gmac->regmap_field, &val); + ret = regmap_field_read(gmac->regmap_field, &val); + if (ret) { + dev_err(priv->device, "Fail to read from regmap field.\n"); + return ret; + } + reg = gmac->variant->default_syscon_value; if (reg != val) dev_warn(priv->device, diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 331c16d30d5d..23281aeeb222 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd) sp->dev->stats.rx_bytes += count; - if ((skb = dev_alloc_skb(count)) == NULL) + if ((skb = dev_alloc_skb(count + 1)) == NULL) goto out_mem; - ptr = skb_put(skb, count); + ptr = skb_put(skb, count + 1); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a45c5de96ab1..a5a57ca94c1a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat * Local device Link partner * Pause AsymDir Pause AsymDir Result * 1 X 1 X TX+RX - * 0 1 1 1 RX - * 1 1 0 1 TX + * 0 1 1 1 TX + * 1 1 0 1 RX */ static void phylink_resolve_flow(struct phylink *pl, struct phylink_link_state *state) @@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl, new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; else if (pause & MLO_PAUSE_ASYM) new_pause = state->pause & MLO_PAUSE_SYM ? - MLO_PAUSE_RX : MLO_PAUSE_TX; + MLO_PAUSE_TX : MLO_PAUSE_RX; } else { new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db16d7a13e00..aab0be40d443 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev) } static int tun_attach(struct tun_struct *tun, struct file *file, - bool skip_filter, bool napi, bool napi_frags) + bool skip_filter, bool napi, bool napi_frags, + bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; @@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, * initialized tfile; otherwise we risk using half-initialized * object. */ - rcu_assign_pointer(tfile->tun, tun); + if (publish_tun) + rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); @@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); + ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; @@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); + ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) goto err_free_flow; err = register_netdevice(tun->dev); if (err < 0) goto err_detach; + /* free_netdev() won't check refcnt, to aovid race + * with dev_put() we need publish tun after registration. + */ + rcu_assign_pointer(tfile->tun, tun); } netif_carrier_on(tun->dev); @@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, - tun->flags & IFF_NAPI_FRAGS); + tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8458e88c18e9..32f53de5b1fe 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) goto bad_desc; } skip: - if (rndis && header.usb_cdc_acm_descriptor && + /* Communcation class functions with bmCapabilities are not + * RNDIS. But some Wireless class RNDIS functions use + * bmCapabilities for their own purpose. The failsafe is + * therefore applied only to Communication class RNDIS + * functions. The rndis test is redundant, but a cheap + * optimization. + */ + if (rndis && is_rndis(&intf->cur_altsetting->desc) && + header.usb_cdc_acm_descriptor && header.usb_cdc_acm_descriptor->bmCapabilities) { dev_dbg(&intf->dev, "ACM capabilities %02x, not really RNDIS?\n", diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4f3de0ac8b0b..ba98e0971b84 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } - if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index d74349628db2..0e6a51525d91 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - lmc_trace(dev, "lmc_runnin_reset_out"); + lmc_trace(dev, "lmc_running_reset_out"); } diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 6642bcb27761..8efb493ceec2 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, "%d\n", result); result = 0; error_cmd: + kfree(cmd); kfree_skb(ack_skb); error_msg_to_dev: error_alloc: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e5ca1f2685b6..e29c47744ef5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (cfg == &iwl_ax101_cfg_qu_hr) - cfg = &iwl_ax101_cfg_quz_hr; - else if (cfg == &iwl_ax201_cfg_qu_hr) - cfg = &iwl_ax201_cfg_quz_hr; - else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) + iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; + else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; } #endif diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 653d347a9a19..580387f9f12a 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, } vs_ie = (struct ieee_types_header *)vendor_ie; + if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > + IEEE_MAX_IE_SIZE) + return -EINVAL; memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), vs_ie, vs_ie->len + 2); le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 18f7d9bf30b2..0939a8c8f3ab 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) + return; memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } @@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); - if (rate_ie) + if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) + return; memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); + } return; } @@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; + if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) + return; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 40c0d536e20d..9d4426f6905f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); } + if (is_mt7630(dev)) { + dev->mt76.cap.has_5ghz = false; + dev_dbg(dev->mt76.dev, "mask out 5GHz support\n"); + } + if (!mt76x02_field_valid(nic_conf1 & 0xff)) nic_conf1 &= 0xff00; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index f84a7df296ea..7705e55aa3d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw) mt76x0e_stop_hw(dev); } +static int +mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76x02_dev *dev = hw->priv; + + if (is_mt7630(dev)) + return -EOPNOTSUPP; + + return mt76x02_set_key(hw, cmd, vif, sta, key); +} + static void mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) @@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .configure_filter = mt76x02_configure_filter, .bss_info_changed = mt76x02_bss_info_changed, .sta_state = mt76_sta_state, - .set_key = mt76x02_set_key, + .set_key = mt76x0e_set_key, .conf_tx = mt76x02_conf_tx, .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index ecbe78b8027b..f1cdcd61c54a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev, offset = MAC_IVEIV_ENTRY(key->hw_key_idx); - rt2800_register_multiread(rt2x00dev, offset, - &iveiv_entry, sizeof(iveiv_entry)); - if ((crypto->cipher == CIPHER_TKIP) || - (crypto->cipher == CIPHER_TKIP_NO_MIC) || - (crypto->cipher == CIPHER_AES)) - iveiv_entry.iv[3] |= 0x20; - iveiv_entry.iv[3] |= key->keyidx << 6; + if (crypto->cmd == SET_KEY) { + rt2800_register_multiread(rt2x00dev, offset, + &iveiv_entry, sizeof(iveiv_entry)); + if ((crypto->cipher == CIPHER_TKIP) || + (crypto->cipher == CIPHER_TKIP_NO_MIC) || + (crypto->cipher == CIPHER_AES)) + iveiv_entry.iv[3] |= 0x20; + iveiv_entry.iv[3] |= key->keyidx << 6; + } else { + memset(&iveiv_entry, 0, sizeof(iveiv_entry)); + } + rt2800_register_multiwrite(rt2x00dev, offset, &iveiv_entry, sizeof(iveiv_entry)); } @@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, switch (rt2x00dev->default_ant.rx_chain_num) { case 3: /* Turn on tertiary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1); /* fall-through */ case 2: /* Turn on secondary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); /* fall-through */ case 1: /* Turn on primary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, - rf->channel > 14); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, - rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); break; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index f5048d4b8cb6..760eaffeebd6 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -645,7 +645,6 @@ fail_rx: kfree(rsi_dev->tx_buffer); fail_eps: - kfree(rsi_dev); return status; } diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index e42850095892..7eda62a9e0df 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context) &echo_response); if (result) { dev_err(&st95context->spicontext.spidev->dev, - "err: echo response receieve error = 0x%x\n", result); + "err: echo response receive error = 0x%x\n", result); return result; } diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3e7b11cf1aae..cb98b8fe786e 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) resource_size_t start, size; struct nd_region *nd_region; unsigned long npfns, align; + u32 end_trunc; struct nd_pfn_sb *pfn_sb; phys_addr_t offset; const char *sig; @@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) size = resource_size(&nsio->res); npfns = PHYS_PFN(size - SZ_8K); align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); + end_trunc = start + size - ALIGN_DOWN(start + size, align); if (nd_pfn->mode == PFN_MODE_PMEM) { /* * The altmap should be padded out to the block size used @@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) return -ENXIO; } - npfns = PHYS_PFN(size - offset); + npfns = PHYS_PFN(size - offset - end_trunc); pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); @@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); pfn_sb->version_major = cpu_to_le16(1); pfn_sb->version_minor = cpu_to_le16(3); + pfn_sb->end_trunc = cpu_to_le32(end_trunc); pfn_sb->align = cpu_to_le32(nd_pfn->align); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index ba6438ac4d72..ff84d1afd229 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -2552,7 +2552,7 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx, if (IS_ERR(map)) return map; } else - map = ERR_PTR(-ENODEV); + return ERR_PTR(-ENODEV); ctx->maps[ASPEED_IP_LPC] = map; dev_dbg(ctx->dev, "Acquired LPC regmap"); @@ -2562,6 +2562,33 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx, return ERR_PTR(-EINVAL); } +static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, + bool enabled) +{ + int ret; + int i; + + for (i = 0; i < expr->ndescs; i++) { + const struct aspeed_sig_desc *desc = &expr->descs[i]; + struct regmap *map; + + map = aspeed_g5_acquire_regmap(ctx, desc->ip); + if (IS_ERR(map)) { + dev_err(ctx->dev, + "Failed to acquire regmap for IP block %d\n", + desc->ip); + return PTR_ERR(map); + } + + ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]); + if (ret <= 0) + return ret; + } + + return 1; +} + /** * Configure a pin's signal by applying an expression's descriptor state for * all descriptors in the expression. @@ -2647,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx, } static const struct aspeed_pinmux_ops aspeed_g5_ops = { + .eval = aspeed_g5_sig_expr_eval, .set = aspeed_g5_sig_expr_set, }; diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c index 839c01b7953f..57305ca838a7 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.c +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c @@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, * neither the enabled nor disabled state. Thus we must explicitly test for * either condition as required. */ -int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, +int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled) { - int i; int ret; + int i; + + if (ctx->ops->eval) + return ctx->ops->eval(ctx, expr, enabled); for (i = 0; i < expr->ndescs; i++) { const struct aspeed_sig_desc *desc = &expr->descs[i]; diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index 52d299b59ce2..db3457c86f48 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -702,6 +702,8 @@ struct aspeed_pin_function { struct aspeed_pinmux_data; struct aspeed_pinmux_ops { + int (*eval)(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, bool enabled); int (*set)(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, bool enabled); }; @@ -722,9 +724,8 @@ struct aspeed_pinmux_data { int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled, struct regmap *map); -int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, - const struct aspeed_sig_expr *expr, - bool enabled); +int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx, + const struct aspeed_sig_expr *expr, bool enabled); static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx, const struct aspeed_sig_expr *expr, diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c index 584284938ac9..d2f804dbc785 100644 --- a/drivers/regulator/act8945a-regulator.c +++ b/drivers/regulator/act8945a-regulator.c @@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) reg = ACT8945A_DCDC3_CTRL; break; case ACT8945A_ID_LDO1: - reg = ACT8945A_LDO1_SUS; + reg = ACT8945A_LDO1_CTRL; break; case ACT8945A_ID_LDO2: - reg = ACT8945A_LDO2_SUS; + reg = ACT8945A_LDO2_CTRL; break; case ACT8945A_ID_LDO3: - reg = ACT8945A_LDO3_SUS; + reg = ACT8945A_LDO3_CTRL; break; case ACT8945A_ID_LDO4: - reg = ACT8945A_LDO4_SUS; + reg = ACT8945A_LDO4_CTRL; break; default: return -EINVAL; diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 04b732991d69..4d859fef55e6 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np, ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np, "enable-gpios", 0, gflags, "gpio-en-ldo"); - if (ena_gpiod) { + if (!IS_ERR(ena_gpiod)) { config->ena_gpiod = ena_gpiod; devm_gpiod_unhinge(chip->dev, config->ena_gpiod); } @@ -459,7 +459,7 @@ static int slg51000_i2c_probe(struct i2c_client *client, GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE, "slg51000-cs"); - if (cs_gpiod) { + if (!IS_ERR(cs_gpiod)) { dev_info(dev, "Found chip selector property\n"); chip->cs_gpiod = cs_gpiod; } diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 6fa15b2d6fb3..866b4dd01da9 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = { 2500, 2750, }; +/* 600mV to 1450mV in 12.5 mV steps */ +static const struct regulator_linear_range VDD1_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500) +}; + +/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */ +static const struct regulator_linear_range VDD2_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500), + REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500) +}; + static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev) } static const struct regulator_ops twl4030smps_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage = twl4030smps_set_voltage, .get_voltage = twl4030smps_get_voltage, }; @@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \ }, \ } -#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ +#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \ + n_volt) \ static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ @@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \ .owner = THIS_MODULE, \ .enable_time = turnon_delay, \ .of_map_mode = twl4030reg_map_mode, \ + .n_voltages = n_volt, \ + .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \ + .linear_ranges = label ## _ranges, \ }, \ } @@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00); TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08); -TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08); -TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08); +TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68); +TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69); /* VUSBCP is managed *only* by the USB subchip */ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 8d8c495b5b60..d65558619ab0 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, * 0 = Set nr_hw_queues by the number of CPUs or HW queues. * 1,128 = Manually specify the maximum nr_hw_queue value to be set, * - * Value range is [0,128]. Default value is 8. + * Value range is [0,256]. Default value is 8. */ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 329f7aa7e169..a81ef0293696 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -46,7 +46,7 @@ /* FCP MQ queue count limiting */ #define LPFC_FCP_MQ_THRESHOLD_MIN 0 -#define LPFC_FCP_MQ_THRESHOLD_MAX 128 +#define LPFC_FCP_MQ_THRESHOLD_MAX 256 #define LPFC_FCP_MQ_THRESHOLD_DEF 8 /* Common buffer size to accomidate SCSI and NVME IO buffers */ diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index d5cf953b4337..7d622ea1274e 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, struct geni_wrapper *wrapper = se->wrapper; u32 val; + if (!wrapper) + return -EINVAL; + *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); if (dma_mapping_error(wrapper->dev, *iova)) return -EIO; @@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, struct geni_wrapper *wrapper = se->wrapper; u32 val; + if (!wrapper) + return -EINVAL; + *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(wrapper->dev, *iova)) return -EIO; diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 9e90e969af55..7804869c6a31 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -22,6 +22,12 @@ * Using this limit prevents one virtqueue from starving others. */ #define VHOST_TEST_WEIGHT 0x80000 +/* Max number of packets transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * pkts. + */ +#define VHOST_TEST_PKT_WEIGHT 256 + enum { VHOST_TEST_VQ = 0, VHOST_TEST_VQ_MAX = 1, @@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n) } vhost_add_used_and_signal(&n->dev, vq, head, 0); total_len += len; - if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { - vhost_poll_queue(&vq->poll); + if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) break; - } } mutex_unlock(&vq->mutex); @@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f) dev = &n->dev; vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT); f->private_data = n; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 0536f8526359..36ca2cf419bf 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init); int vhost_poll_start(struct vhost_poll *poll, struct file *file) { __poll_t mask; - int ret = 0; if (poll->wqh) return 0; @@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); if (mask & EPOLLERR) { vhost_poll_stop(poll); - ret = -EINVAL; + return -EINVAL; } - return ret; + return 0; } EXPORT_SYMBOL_GPL(vhost_poll_start); @@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) __vhost_vq_meta_reset(d->vqs[i]); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_map_unprefetch(struct vhost_map *map) -{ - kfree(map->pages); - map->pages = NULL; - map->npages = 0; - map->addr = NULL; -} - -static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) -{ - struct vhost_map *map[VHOST_NUM_ADDRS]; - int i; - - spin_lock(&vq->mmu_lock); - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - map[i] = rcu_dereference_protected(vq->maps[i], - lockdep_is_held(&vq->mmu_lock)); - if (map[i]) - rcu_assign_pointer(vq->maps[i], NULL); - } - spin_unlock(&vq->mmu_lock); - - synchronize_rcu(); - - for (i = 0; i < VHOST_NUM_ADDRS; i++) - if (map[i]) - vhost_map_unprefetch(map[i]); - -} - -static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) -{ - int i; - - vhost_uninit_vq_maps(vq); - for (i = 0; i < VHOST_NUM_ADDRS; i++) - vq->uaddrs[i].size = 0; -} - -static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, - unsigned long start, - unsigned long end) -{ - if (unlikely(!uaddr->size)) - return false; - - return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); -} - -static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct vhost_map *map; - int i; - - if (!vhost_map_range_overlap(uaddr, start, end)) - return; - - spin_lock(&vq->mmu_lock); - ++vq->invalidate_count; - - map = rcu_dereference_protected(vq->maps[index], - lockdep_is_held(&vq->mmu_lock)); - if (map) { - if (uaddr->write) { - for (i = 0; i < map->npages; i++) - set_page_dirty(map->pages[i]); - } - rcu_assign_pointer(vq->maps[index], NULL); - } - spin_unlock(&vq->mmu_lock); - - if (map) { - synchronize_rcu(); - vhost_map_unprefetch(map); - } -} - -static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end)) - return; - - spin_lock(&vq->mmu_lock); - --vq->invalidate_count; - spin_unlock(&vq->mmu_lock); -} - -static int vhost_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - if (!mmu_notifier_range_blockable(range)) - return -EAGAIN; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_start(vq, j, - range->start, - range->end); - } - - return 0; -} - -static void vhost_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_end(vq, j, - range->start, - range->end); - } -} - -static const struct mmu_notifier_ops vhost_mmu_notifier_ops = { - .invalidate_range_start = vhost_invalidate_range_start, - .invalidate_range_end = vhost_invalidate_range_end, -}; - -static void vhost_init_maps(struct vhost_dev *dev) -{ - struct vhost_virtqueue *vq; - int i, j; - - dev->mmu_notifier.ops = &vhost_mmu_notifier_ops; - - for (i = 0; i < dev->nvqs; ++i) { - vq = dev->vqs[i]; - for (j = 0; j < VHOST_NUM_ADDRS; j++) - RCU_INIT_POINTER(vq->maps[j], NULL); - } -} -#endif - static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; - vq->invalidate_count = 0; __vhost_vq_meta_reset(vq); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_reset_vq_maps(vq); -#endif } static int vhost_worker(void *data) @@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev, INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_init_maps(dev); -#endif + for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; @@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev, vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); - spin_lock_init(&vq->mmu_lock); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, @@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev) if (err) goto err_cgroup; -#if VHOST_ARCH_CAN_ACCEL_UACCESS - err = mmu_notifier_register(&dev->mmu_notifier, dev->mm); - if (err) - goto err_mmu_notifier; -#endif - return 0; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS -err_mmu_notifier: - vhost_dev_free_iovecs(dev); -#endif err_cgroup: kthread_stop(worker); dev->worker = NULL; @@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_setup_uaddr(struct vhost_virtqueue *vq, - int index, unsigned long uaddr, - size_t size, bool write) -{ - struct vhost_uaddr *addr = &vq->uaddrs[index]; - - addr->uaddr = uaddr; - addr->size = size; - addr->write = write; -} - -static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) -{ - vhost_setup_uaddr(vq, VHOST_ADDR_DESC, - (unsigned long)vq->desc, - vhost_get_desc_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL, - (unsigned long)vq->avail, - vhost_get_avail_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_USED, - (unsigned long)vq->used, - vhost_get_used_size(vq, vq->num), - true); -} - -static int vhost_map_prefetch(struct vhost_virtqueue *vq, - int index) -{ - struct vhost_map *map; - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct page **pages; - int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE); - int npinned; - void *vaddr, *v; - int err; - int i; - - spin_lock(&vq->mmu_lock); - - err = -EFAULT; - if (vq->invalidate_count) - goto err; - - err = -ENOMEM; - map = kmalloc(sizeof(*map), GFP_ATOMIC); - if (!map) - goto err; - - pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC); - if (!pages) - goto err_pages; - - err = EFAULT; - npinned = __get_user_pages_fast(uaddr->uaddr, npages, - uaddr->write, pages); - if (npinned > 0) - release_pages(pages, npinned); - if (npinned != npages) - goto err_gup; - - for (i = 0; i < npinned; i++) - if (PageHighMem(pages[i])) - goto err_gup; - - vaddr = v = page_address(pages[0]); - - /* For simplicity, fallback to userspace address if VA is not - * contigious. - */ - for (i = 1; i < npinned; i++) { - v += PAGE_SIZE; - if (v != page_address(pages[i])) - goto err_gup; - } - - map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1)); - map->npages = npages; - map->pages = pages; - - rcu_assign_pointer(vq->maps[index], map); - /* No need for a synchronize_rcu(). This function should be - * called by dev->worker so we are serialized with all - * readers. - */ - spin_unlock(&vq->mmu_lock); - - return 0; - -err_gup: - kfree(pages); -err_pages: - kfree(map); -err: - spin_unlock(&vq->mmu_lock); - return err; -} -#endif - void vhost_dev_cleanup(struct vhost_dev *dev) { int i; @@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev) kthread_stop(dev->worker); dev->worker = NULL; } - if (dev->mm) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - mmu_notifier_unregister(&dev->mmu_notifier, dev->mm); -#endif + if (dev->mm) mmput(dev->mm); - } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - for (i = 0; i < dev->nvqs; i++) - vhost_uninit_vq_maps(dev->vqs[i]); -#endif dev->mm = NULL; } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); @@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *((__virtio16 *)&used->ring[vq->num]) = - cpu_to_vhost16(vq, vq->avail_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)); } @@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, struct vring_used_elem *head, int idx, int count) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - size_t size; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - size = count * sizeof(*head); - memcpy(used->ring + idx, head, size); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_to_user(vq, vq->used->ring + idx, head, count * sizeof(*head)); } @@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->flags = cpu_to_vhost16(vq, vq->used_flags); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags); } @@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->idx = cpu_to_vhost16(vq, vq->last_used_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx); } @@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *idx = avail->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *idx, &vq->avail->idx); } static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, __virtio16 *head, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *head = avail->ring[idx & (vq->num - 1)]; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *head, &vq->avail->ring[idx & (vq->num - 1)]); } @@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, __virtio16 *flags) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *flags = avail->flags; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *flags, &vq->avail->flags); } static inline int vhost_get_used_event(struct vhost_virtqueue *vq, __virtio16 *event) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *event = (__virtio16)avail->ring[vq->num]; - rcu_read_unlock(); - return 0; - } - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *event, vhost_used_event(vq)); } static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *idx = used->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_used(vq, *idx, &vq->used->idx); } static inline int vhost_get_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_desc *d; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]); - if (likely(map)) { - d = map->addr; - *desc = *(d + idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } @@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq, return true; } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq) -{ - struct vhost_map __rcu *map; - int i; - - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[i]); - rcu_read_unlock(); - if (unlikely(!map)) - vhost_map_prefetch(vq, i); - } -} -#endif - int vq_meta_prefetch(struct vhost_virtqueue *vq) { unsigned int num = vq->num; - if (!vq->iotlb) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_vq_map_prefetch(vq); -#endif + if (!vq->iotlb) return 1; - } return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && @@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, mutex_lock(&vq->mutex); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Unregister MMU notifer to allow invalidation callback - * can access vq->uaddrs[] without holding a lock. - */ - if (d->mm) - mmu_notifier_unregister(&d->mmu_notifier, d->mm); - - vhost_uninit_vq_maps(vq); -#endif - switch (ioctl) { case VHOST_SET_VRING_NUM: r = vhost_vring_set_num(d, vq, argp); @@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, BUG(); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_setup_vq_uaddr(vq); - - if (d->mm) - mmu_notifier_register(&d->mmu_notifier, d->mm); -#endif - mutex_unlock(&vq->mutex); return r; @@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq, /* If this is an input descriptor, increment that count. */ if (access == VHOST_ACCESS_WO) { *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; @@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* If this is an input descriptor, * increment that count. */ *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 42a8c2a13ab1..e9ed2722b633 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,9 +12,6 @@ #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/atomic.h> -#include <linux/pagemap.h> -#include <linux/mmu_notifier.h> -#include <asm/cacheflush.h> struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -83,24 +80,6 @@ enum vhost_uaddr_type { VHOST_NUM_ADDRS = 3, }; -struct vhost_map { - int npages; - void *addr; - struct page **pages; -}; - -struct vhost_uaddr { - unsigned long uaddr; - size_t size; - bool write; -}; - -#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#else -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#endif - /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -111,22 +90,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Read by memory accessors, modified by meta data - * prefetching, MMU notifier and vring ioctl(). - * Synchonrized through mmu_lock (writers) and RCU (writers - * and readers). - */ - struct vhost_map __rcu *maps[VHOST_NUM_ADDRS]; - /* Read by MMU notifier, modified by vring ioctl(), - * synchronized through MMU notifier - * registering/unregistering. - */ - struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS]; -#endif const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; - struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -181,8 +145,6 @@ struct vhost_virtqueue { bool user_be; #endif u32 busyloop_timeout; - spinlock_t mmu_lock; - int invalidate_count; }; struct vhost_msg_node { @@ -196,9 +158,6 @@ struct vhost_msg_node { struct vhost_dev { struct mm_struct *mm; -#ifdef CONFIG_MMU_NOTIFIER - struct mmu_notifier mmu_notifier; -#endif struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index c8be1c4f5b55..bdc08244a648 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unmap_release: err_idx = i; - i = head; + + if (indirect) + i = 0; + else + i = head; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; vring_unmap_one_split(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } if (indirect) |