diff options
Diffstat (limited to 'drivers')
644 files changed, 22743 insertions, 8342 deletions
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 82b0b5710979..b0399e8f6d27 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address, ACPI_FUNCTION_TRACE(tb_install_and_load_table); - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); - /* Install the table and load it into the namespace */ status = acpi_tb_install_standard_table(address, flags, TRUE, override, &i); if (ACPI_FAILURE(status)) { - goto unlock_and_exit; + goto exit; } - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); status = acpi_tb_load_table(i, acpi_gbl_root_node); - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); -unlock_and_exit: +exit: *table_index = i; - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 5fdf251a9f97..01e1b3d63fc0 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address, goto release_and_exit; } + /* Acquire the table lock */ + + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); + if (reload) { /* * Validate the incoming table signature. @@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, new_table_desc.signature.integer)); status = AE_BAD_SIGNATURE; - goto release_and_exit; + goto unlock_and_exit; } /* Check if table is already registered */ @@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, /* Table is still loaded, this is an error */ status = AE_ALREADY_EXISTS; - goto release_and_exit; + goto unlock_and_exit; } else { /* * Table was unloaded, allow it to be reloaded. @@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, * indicate the re-installation. */ acpi_tb_uninstall_table(&new_table_desc); + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); *table_index = i; return_ACPI_STATUS(AE_OK); } @@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address, /* Invoke table handler if present */ + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); if (acpi_gbl_table_handler) { (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer, acpi_gbl_table_handler_context); } + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); + +unlock_and_exit: + + /* Release the table lock */ + + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); release_and_exit: diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9b6cebe227a0..54abb26b7366 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void) if (acpi_sleep_state_supported(i)) sleep_states[i] = 1; - /* - * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and - * the default suspend mode was not selected from the command line. - */ - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 && - mem_sleep_default > PM_SUSPEND_MEM) - mem_sleep_default = PM_SUSPEND_FREEZE; - suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); freeze_set_ops(&acpi_freeze_ops); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 02ded25c82e4..7f48156cbc0c 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), }, }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */ - /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */ - .callback = video_detect_force_native, - .ident = "HP Pavilion dv6", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), - }, - }, - { }, }; diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 4ef4c5caed4f..8a8e403644d6 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -132,9 +132,9 @@ config HT16K33 tristate "Holtek Ht16K33 LED controller with keyscan" depends on FB && OF && I2C && INPUT select FB_SYS_FOPS - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT select INPUT_MATRIXKMAP select FB_BACKLIGHT help diff --git a/drivers/base/base.h b/drivers/base/base.h index ada9dce34e6d..e19b1008e5fb 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -141,8 +141,6 @@ extern void device_unblock_probing(void); extern struct kset *devices_kset; extern void devices_kset_move_last(struct device *dev); -extern struct device_attribute dev_attr_deferred_probe; - #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) extern void module_add_driver(struct module *mod, struct device_driver *drv); extern void module_remove_driver(struct device_driver *drv); diff --git a/drivers/base/core.c b/drivers/base/core.c index 020ea7f05520..8c25e68e67d7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev) goto err_remove_dev_groups; } - error = device_create_file(dev, &dev_attr_deferred_probe); - if (error) - goto err_remove_online; - return 0; - err_remove_online: - device_remove_file(dev, &dev_attr_online); err_remove_dev_groups: device_remove_groups(dev, dev->groups); err_remove_type_groups: @@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; - device_remove_file(dev, &dev_attr_deferred_probe); device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a8b258e5407b..a1fbf55c4d3a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static atomic_t deferred_trigger_count = ATOMIC_INIT(0); -static ssize_t deferred_probe_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - bool value; - - mutex_lock(&deferred_probe_mutex); - value = !list_empty(&dev->p->deferred_probe); - mutex_unlock(&deferred_probe_mutex); - - return sprintf(buf, "%d\n", value); -} -DEVICE_ATTR_RO(deferred_probe); - /* * In some cases, like suspend to RAM or hibernation, It might be reasonable * to prohibit probing of devices as it could be unsafe. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ab8ea1253e6..dacb6a8418aa 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev, sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); + zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); + zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 38c576f76d36..9fd06eeb1a17 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index, static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); - int result, flags; + int result; struct nbd_request request; unsigned long size = blk_rq_bytes(req); struct bio *bio; @@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) if (type != NBD_CMD_WRITE) return 0; - flags = 0; bio = req->bio; while (bio) { struct bio *next = bio->bi_next; @@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) bio_for_each_segment(bvec, bio, iter) { bool is_last = !next && bio_iter_last(bvec, iter); + int flags = is_last ? 0 : MSG_MORE; - if (is_last) - flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); result = sock_send_bvec(nbd, index, &bvec, flags); @@ -1042,6 +1040,7 @@ static int __init nbd_init(void) return -ENOMEM; for (i = 0; i < nbds_max; i++) { + struct request_queue *q; struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) goto out; @@ -1067,12 +1066,13 @@ static int __init nbd_init(void) * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ - disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); - if (!disk->queue) { + q = blk_mq_init_queue(&nbd_dev[i].tag_set); + if (IS_ERR(q)) { blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); goto out; } + disk->queue = q; /* * Tell the block layer that we are not a rotational device diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5545a679abd8..10332c24f961 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -56,6 +56,7 @@ struct virtblk_req { struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; u8 status; + u8 sense[SCSI_SENSE_BUFFERSIZE]; struct scatterlist sg[]; }; @@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq, } if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { - sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); + memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); + sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); sgs[num_out + num_in++] = &sense; sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); sgs[num_out + num_in++] = &inhdr; @@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev) if (err) goto out_put_disk; - q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); + q = blk_mq_init_queue(&vblk->tag_set); if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } + vblk->disk->queue = q; q->queuedata = vblk; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b2bdfa81f929..265f1a7072e9 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -197,13 +197,13 @@ struct blkfront_info /* Number of pages per ring buffer. */ unsigned int nr_ring_pages; struct request_queue *rq; - unsigned int feature_flush; - unsigned int feature_fua; + unsigned int feature_flush:1; + unsigned int feature_fua:1; unsigned int feature_discard:1; unsigned int feature_secdiscard:1; + unsigned int feature_persistent:1; unsigned int discard_granularity; unsigned int discard_alignment; - unsigned int feature_persistent:1; /* Number of 4KB segments handled */ unsigned int max_indirect_segments; int is_ready; @@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) } else grants = info->max_indirect_segments; - psegs = grants / GRANTS_PER_PSEG; + psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG); err = fill_grant_buffer(rinfo, (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); @@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) blkfront_setup_discard(info); info->feature_persistent = - xenbus_read_unsigned(info->xbdev->otherend, - "feature-persistent", 0); + !!xenbus_read_unsigned(info->xbdev->otherend, + "feature-persistent", 0); indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + if (indirect_segments > xen_blkif_max_segments) + indirect_segments = xen_blkif_max_segments; + if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) + indirect_segments = 0; + info->max_indirect_segments = indirect_segments; } /* @@ -2652,6 +2655,9 @@ static int __init xlblk_init(void) if (!xen_domain()) return -ENODEV; + if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) + xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; + if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 15f58ab44d0b..e5ab7d9e8c45 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -25,6 +25,7 @@ #include <linux/genhd.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/backing-dev.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/err.h> @@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec) return bvec->bv_len != PAGE_SIZE; } +static void zram_revalidate_disk(struct zram *zram) +{ + revalidate_disk(zram->disk); + /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ + zram->disk->queue->backing_dev_info.capabilities |= + BDI_CAP_STABLE_WRITES; +} + /* * Check if request is within bounds and aligned on zram logical blocks. */ @@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev, zram->comp = comp; zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); + zram_revalidate_disk(zram); up_write(&zram->init_lock); - /* - * Revalidate disk out of the init_lock to avoid lockdep splat. - * It's okay because disk's capacity is protected by init_lock - * so that revalidate_disk always sees up-to-date capacity. - */ - revalidate_disk(zram->disk); - return len; out_destroy_comp: @@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev, /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); zram_reset_device(zram); - revalidate_disk(zram->disk); + zram_revalidate_disk(zram); bdput(bdev); mutex_lock(&bdev->bd_mutex); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 5bb1985ec484..6d9cc2d39d22 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf, char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ int err = 0; - if (!pfn_valid(PFN_DOWN(p))) - return -EIO; - read = 0; if (p < (unsigned long) high_memory) { low_count = count; @@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, * by the kernel or data corruption may occur */ kbuf = xlate_dev_kmem_ptr((void *)p); + if (!virt_addr_valid(kbuf)) + return -ENXIO; if (copy_to_user(buf, kbuf, sz)) return -EFAULT; @@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, * corruption may occur. */ ptr = xlate_dev_kmem_ptr((void *)p); + if (!virt_addr_valid(ptr)) + return -ENXIO; copied = copy_from_user(ptr, buf, sz); if (copied) { @@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; - if (!pfn_valid(PFN_DOWN(p))) - return -EIO; - if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 02819e0703c8..87885d146dbb 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp) struct pardevice *pdev = NULL; char *name; struct pardev_cb ppdev_cb; + int rc = 0; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp) port = parport_find_number(minor); if (!port) { pr_warn("%s: no associated port!\n", name); - kfree(name); - return -ENXIO; + rc = -ENXIO; + goto err; } memset(&ppdev_cb, 0, sizeof(ppdev_cb)); @@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp) ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); parport_put_port(port); - kfree(name); if (!pdev) { pr_warn("%s: failed to register device!\n", name); - return -ENXIO; + rc = -ENXIO; + goto err; } pp->pdev = pdev; dev_dbg(&pdev->dev, "registered pardevice\n"); - return 0; +err: + kfree(name); + return rc; } static enum ieee1284_phase init_phase(int mode) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 8b00e79c2683..17857beb4892 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 8c8b495cbf0d..cdc092a1d9ef 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", - GATE_BUS_TOP, 27, 0, 0), + GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", - GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), + GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", - GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", GATE_BUS_TOP, 5, 0, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", - GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", @@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", - GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", GATE_BUS_TOP, 16, 0, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, 0, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", - GATE_BUS_TOP, 18, 0, 0), + GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0), GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", GATE_BUS_TOP, 28, 0, 0), GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", GATE_BUS_TOP, 29, 0, 0), GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", - SRC_MASK_TOP2, 24, 0, 0), + SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0), GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, 0, 0), diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 4da1dc2278bd..670ff0f25b67 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) if (mct_int_type == MCT_INT_SPI) { if (evt->irq != -1) disable_irq_nosync(evt->irq); + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f91c25718d16..a54d65aa776d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits = &performance_limits; perf_limits = limits; } - if (policy->max >= policy->cpuinfo.max_freq) { + if (policy->max >= policy->cpuinfo.max_freq && + !limits->no_turbo) { pr_debug("set performance\n"); intel_pstate_set_performance_limits(perf_limits); goto out; @@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) policy->policy != CPUFREQ_POLICY_PERFORMANCE) return -EINVAL; + /* When per-CPU limits are used, sysfs limits are not used */ + if (!per_cpu_limits) { + unsigned int max_freq, min_freq; + + max_freq = policy->cpuinfo.max_freq * + limits->max_sysfs_pct / 100; + min_freq = policy->cpuinfo.max_freq * + limits->min_sysfs_pct / 100; + cpufreq_verify_within_limits(policy, min_freq, max_freq); + } + return 0; } diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index e00c9b022964..5a37b9fcf40d 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -24,5 +24,5 @@ config DW_DMAC_PCI select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the - platfroms that enumerate it as a PCI device. For example, + platforms that enumerate it as a PCI device. For example, Intel Medfield has integrated this GPDMA controller. diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 8e67895bcca3..abcc51b343ce 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -64,6 +64,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f +#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 90eddd9f07e4..cc5259b881d4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) }, + /* I/OAT v3.3 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, @@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev) } } +static inline bool is_skx_ioat(struct pci_dev *pdev) +{ + return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; +} + static bool is_xeon_cb32(struct pci_dev *pdev) { return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || - is_hsw_ioat(pdev) || is_bdx_ioat(pdev); + is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev); } bool is_bwd_ioat(struct pci_dev *pdev) @@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ ioat_chan->completion = dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, - GFP_KERNEL, &ioat_chan->completion_dma); + GFP_NOWAIT, &ioat_chan->completion_dma); if (!ioat_chan->completion) return -ENOMEM; @@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); order = IOAT_MAX_ORDER; - ring = ioat_alloc_ring(c, order, GFP_KERNEL); + ring = ioat_alloc_ring(c, order, GFP_NOWAIT); if (!ring) return -ENOMEM; @@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) device->version = readb(device->reg_base + IOAT_VER_OFFSET); if (device->version >= IOAT_VER_3_0) { + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; err = ioat3_dma_probe(device, ioat_dca_enabled); if (device->version >= IOAT_VER_3_3) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ac68666cd3f4..daf479cce691 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->ccr |= CCR_DST_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_SRC_AMODE_DBLIDX; - d->ei = 1; - /* - * One frame covers the port_window and by configure - * the source frame index to be -1 * (port_window - 1) - * we instruct the sDMA that after a frame is processed - * it should move back to the start of the window. - */ - d->fi = -(port_window_bytes - 1); if (port_window_bytes >= 64) - d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_64; else if (port_window_bytes >= 32) - d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_32; else if (port_window_bytes >= 16) - d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_16; + } else { d->ccr |= CCR_SRC_AMODE_CONSTANT; } @@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->ccr |= CCR_SRC_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_DST_AMODE_DBLIDX; + d->ei = 1; + /* + * One frame covers the port_window and by configure + * the source frame index to be -1 * (port_window - 1) + * we instruct the sDMA that after a frame is processed + * it should move back to the start of the window. + */ + d->fi = -(port_window_bytes - 1); if (port_window_bytes >= 64) - d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_64; else if (port_window_bytes >= 32) - d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_32; else if (port_window_bytes >= 16) - d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_16; } else { d->ccr |= CCR_DST_AMODE_CONSTANT; } @@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( osg->addr = sg_dma_address(sgent); osg->en = en; osg->fn = sg_dma_len(sgent) / frame_bytes; - if (port_window && dir == DMA_MEM_TO_DEV) { + if (port_window && dir == DMA_DEV_TO_MEM) { osg->ei = 1; /* * One frame covers the port_window and by configure @@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev) struct omap_dmadev *od; struct resource *res; int rc, i, irq; + u32 lch_count; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) @@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev) spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); - if (!pdev->dev.of_node) { - od->dma_requests = od->plat->dma_attr->lch_count; - if (unlikely(!od->dma_requests)) - od->dma_requests = OMAP_SDMA_REQUESTS; - } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", - &od->dma_requests)) { + /* Number of DMA requests */ + od->dma_requests = OMAP_SDMA_REQUESTS; + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-requests", + &od->dma_requests)) { dev_info(&pdev->dev, "Missing dma-requests property, using %u.\n", OMAP_SDMA_REQUESTS); - od->dma_requests = OMAP_SDMA_REQUESTS; } - od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, - sizeof(*od->lch_map), GFP_KERNEL); + /* Number of available logical channels */ + if (!pdev->dev.of_node) { + lch_count = od->plat->dma_attr->lch_count; + if (unlikely(!lch_count)) + lch_count = OMAP_SDMA_CHANNELS; + } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", + &lch_count)) { + dev_info(&pdev->dev, + "Missing dma-channels property, using %u.\n", + OMAP_SDMA_CHANNELS); + lch_count = OMAP_SDMA_CHANNELS; + } + + od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map), + GFP_KERNEL); if (!od->lch_map) return -ENOMEM; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 87fd01539fcb..740bbb942594 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -448,6 +448,9 @@ struct dma_pl330_chan { /* for cyclic capability */ bool cyclic; + + /* for runtime pm tracking */ + bool active; }; struct pl330_dmac { @@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data) _stop(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = true; + pch->active = false; } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); @@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data) desc->status = PREP; list_move_tail(&desc->node, &pch->work_list); if (power_down) { + pch->active = true; spin_lock(&pch->thread->dmac->lock); _start(pch->thread); spin_unlock(&pch->thread->dmac->lock); @@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan) unsigned long flags; struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + bool power_down = false; pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); @@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan) pch->thread->req[0].desc = NULL; pch->thread->req[1].desc = NULL; pch->thread->req_running = -1; + power_down = pch->active; + pch->active = false; /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { @@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); + if (power_down) + pm_runtime_put_autosuspend(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; @@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan) * updated on work_list emptiness status. */ WARN_ON(list_empty(&pch->submitted_list)); + pch->active = true; pm_runtime_get_sync(pch->dmac->ddma.dev); } list_splice_tail_init(&pch->submitted_list, &pch->work_list); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2e441d0ccd79..4c357d475465 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac *dmac = to_rcar_dmac(chan->device); + struct rcar_dmac_chan_map *map = &rchan->map; struct rcar_dmac_desc_page *page, *_page; struct rcar_dmac_desc *desc; LIST_HEAD(list); @@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) free_page((unsigned long)page); } + /* Remove slave mapping if present. */ + if (map->slave.xfer_size) { + dma_unmap_resource(chan->device->dev, map->addr, + map->slave.xfer_size, map->dir, 0); + map->slave.xfer_size = 0; + } + pm_runtime_put(chan->device->dev); } diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 3688d0873a3e..3056ce7f8c69 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, struct virt_dma_desc *vdesc; enum dma_status status; unsigned long flags; - u32 residue; + u32 residue = 0; status = dma_cookie_status(c, cookie, state); if ((status == DMA_COMPLETE) || (!state)) @@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, spin_lock_irqsave(&chan->vchan.lock, flags); vdesc = vchan_find_desc(&chan->vchan, cookie); - if (cookie == chan->desc->vdesc.tx.cookie) { + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) residue = stm32_dma_desc_residue(chan, chan->desc, chan->next_sg); - } else if (vdesc) { + else if (vdesc) residue = stm32_dma_desc_residue(chan, to_stm32_dma_desc(vdesc), 0); - } else { - residue = 0; - } - dma_set_residue(state, residue); spin_unlock_irqrestore(&chan->vchan.lock, flags); @@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, struct stm32_dma_chan *chan; struct dma_chan *c; - if (dma_spec->args_count < 3) + if (dma_spec->args_count < 4) return NULL; cfg.channel_id = dma_spec->args[0]; cfg.request_line = dma_spec->args[1]; cfg.stream_config = dma_spec->args[2]; - cfg.threshold = 0; + cfg.threshold = dma_spec->args[3]; if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) return NULL; - if (dma_spec->args_count > 3) - cfg.threshold = dma_spec->args[3]; - chan = &dmadev->chan[cfg.channel_id]; c = dma_get_slave_channel(&chan->vchan.chan); diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 3f24aeb48c0e..2403475a37cf 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_am335x_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } @@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 78298460d168..7c1e3a7b14e0 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) dev_err(&edev->dev, "out of memory in extcon_set_state\n"); kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE); - return 0; + return -ENOMEM; } length = name_show(&edev->dev, NULL, prop_buf); diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index 520a40e5e0e4..6c7d60c239b5 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -71,8 +71,7 @@ void __init efi_fake_memmap(void) } /* allocate memory for new EFI memmap */ - new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map, - PAGE_SIZE); + new_memmap_phy = efi_memmap_alloc(new_nr_map); if (!new_memmap_phy) return; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index b98824e3800a..0e2a96b12cb3 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle); unsigned long get_dram_base(efi_system_table_t *sys_table_arg); -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver); - efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, void *handle, unsigned long *new_fdt_addr, diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index a6a93116a8f0..921dfa047202 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,13 +16,10 @@ #include "efistub.h" -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver) +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, + u64 initrd_addr, u64 initrd_size) { int node, num_rsv; int status; @@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (status) goto fdt_set_fail; - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); + fdt_val64 = U64_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", &fdt_val64, sizeof(fdt_val64)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(map_size); + fdt_val32 = U32_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_size); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_ver); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", &fdt_val32, sizeof(fdt_val32)); if (status) @@ -148,6 +143,43 @@ fdt_set_fail: return EFI_LOAD_ERROR; } +static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) +{ + int node = fdt_path_offset(fdt, "/chosen"); + u64 fdt_val64; + u32 fdt_val32; + int err; + + if (node < 0) + return EFI_LOAD_ERROR; + + fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start", + &fdt_val64, sizeof(fdt_val64)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->map_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_ver); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + return EFI_SUCCESS; +} + #ifndef EFI_FDT_ALIGN #define EFI_FDT_ALIGN EFI_PAGE_SIZE #endif @@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for - * exit_boot_services(). - */ - status = efi_get_memory_map(sys_table, &map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, new_fdt_size, - cmdline_ptr, initrd_addr, initrd_size, - memory_map, map_size, desc_size, desc_ver); + cmdline_ptr, initrd_addr, initrd_size); /* Succeeding the first time is the expected case. */ if (status == EFI_SUCCESS) @@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, /* * We need to allocate more space for the new * device tree, so free existing buffer that is - * too small. Also free memory map, as we will need - * to get new one that reflects the free/alloc we do - * on the device tree buffer. + * too small. */ efi_free(sys_table, new_fdt_size, *new_fdt_addr); - sys_table->boottime->free_pool(memory_map); new_fdt_size += EFI_PAGE_SIZE; } else { pr_efi_err(sys_table, "Unable to construct new device tree.\n"); - goto fail_free_mmap; + goto fail_free_new_fdt; } } - sys_table->boottime->free_pool(memory_map); priv.runtime_map = runtime_map; priv.runtime_entry_count = &runtime_entry_count; status = efi_exit_boot_services(sys_table, handle, &map, &priv, @@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; + status = update_fdt_memmap((void *)*new_fdt_addr, &map); + if (status != EFI_SUCCESS) { + /* + * The kernel won't get far without the memory map, but + * may still be able to print something meaningful so + * return success here. + */ + return EFI_SUCCESS; + } + /* Install the new virtual address map */ svam = sys_table->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, @@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, pr_efi_err(sys_table, "Exit boot services failed.\n"); -fail_free_mmap: - sys_table->boottime->free_pool(memory_map); - fail_free_new_fdt: efi_free(sys_table, new_fdt_size, *new_fdt_addr); diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index f03ddecd232b..78686443cb37 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -9,6 +9,44 @@ #include <linux/efi.h> #include <linux/io.h> #include <asm/early_ioremap.h> +#include <linux/memblock.h> +#include <linux/slab.h> + +static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) +{ + return memblock_alloc(size, 0); +} + +static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) +{ + unsigned int order = get_order(size); + struct page *p = alloc_pages(GFP_KERNEL, order); + + if (!p) + return 0; + + return PFN_PHYS(page_to_pfn(p)); +} + +/** + * efi_memmap_alloc - Allocate memory for the EFI memory map + * @num_entries: Number of entries in the allocated map. + * + * Depending on whether mm_init() has already been invoked or not, + * either memblock or "normal" page allocation is used. + * + * Returns the physical address of the allocated memory map on + * success, zero on failure. + */ +phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +{ + unsigned long size = num_entries * efi.memmap.desc_size; + + if (slab_is_available()) + return __efi_memmap_alloc_late(size); + + return __efi_memmap_alloc_early(size); +} /** * __efi_memmap_init - Common code for mapping the EFI memory map diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 1e8fde8cb803..2292742eac8f 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) return 0; } -static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) +static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f4c26c7826cd..a07ae9e37930 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip) /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); + gpiochip_free_hogs(chip); /* Numb the device, cancelling all outstanding operations */ gdev->chip = NULL; gpiochip_irqchip_remove(chip); acpi_gpiochip_remove(chip); gpiochip_remove_pin_ranges(chip); - gpiochip_free_hogs(chip); of_gpiochip_remove(chip); /* * We accept no more calls into the driver from this point, so @@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) } /** - * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip + * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip * @gpiochip: the gpiochip to add the irqchip to * @irqchip: the irqchip to add to the gpiochip * @first_irq: if not dynamically assigned, the base (first) IRQ to @@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * the pins on the gpiochip can generate a unique IRQ. Everything else * need to be open coded. */ -int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type, - bool nested, - struct lock_class_key *lock_key) +int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type, + bool nested, + struct lock_class_key *lock_key) { struct device_node *of_node; bool irq_base_set = false; @@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, return 0; } -EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); +EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); #else /* CONFIG_GPIOLIB_IRQCHIP */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 9ada56c16a58..4c851fde1e82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, else if (type == CGS_UCODE_ID_SMU_SK) strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); break; + case CHIP_POLARIS12: + strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 29d6d84d1c28..41e41f90265d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 60bd4afe45c8..fe3bb94fe58d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = { "STONEY", "POLARIS10", "POLARIS11", + "POLARIS12", "LAST", }; @@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) case CHIP_FIJI: case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_CARRIZO: case CHIP_STONEY: if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8cb937b2bfcc..2534adaebe30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + /* Polaris12 */ + {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index fc592c2b0e16..95a568df8551 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle) switch (adev->asic_type) { case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_TONGA: case CHIP_FIJI: case CHIP_TOPAZ: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index a81dfaeeb8c0..1d564beb0fde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -65,6 +65,7 @@ #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" +#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); +MODULE_FIRMWARE(FIRMWARE_POLARIS12); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 69b66b9e7f57..8fec802d3908 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -52,6 +52,7 @@ #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" +#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); +MODULE_FIRMWARE(FIRMWARE_POLARIS12); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9999dc71b998..ccb5e02e7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v10_0_show_cursor(crtc); @@ -2620,7 +2617,6 @@ unpin: static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v10_0_lock_cursor(crtc, true); @@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v10_0_show_cursor(crtc); dce_v10_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index b3d62b909f43..a7af5b33a5e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, polaris11_golden_settings_a11, (const u32)ARRAY_SIZE(polaris11_golden_settings_a11)); @@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) num_crtc = 6; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: num_crtc = 5; break; default: @@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev) adev->mode_info.audio.num_pins = 8; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: adev->mode_info.audio.num_pins = 6; break; default: @@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) int pll; if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v11_0_show_cursor(crtc); @@ -2636,7 +2637,6 @@ unpin: static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v11_0_lock_cursor(crtc, true); @@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v11_0_show_cursor(crtc); dce_v11_0_lock_cursor(crtc, false); @@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); int encoder_mode = @@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle) adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; @@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle) amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); amdgpu_atombios_crtc_set_dce_clock(adev, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b4e4ec630e8c..39df6a50637f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, struct amdgpu_device *adev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; + int w = amdgpu_crtc->cursor_width; + amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_y = y; @@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v6_0_show_cursor(crtc); @@ -1986,7 +1985,6 @@ unpin: static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v6_0_lock_cursor(crtc, true); @@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v6_0_show_cursor(crtc); dce_v6_0_lock_cursor(crtc, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 584abe834a3c..28102bb1704d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v8_0_show_cursor(crtc); @@ -2471,7 +2468,6 @@ unpin: static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v8_0_lock_cursor(crtc, true); @@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v8_0_show_cursor(crtc); dce_v8_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e82ceb7..e9a176891e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - kfree(amdgpu_encoder->enc_priv); drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); + kfree(encoder); } static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d0ec00986f38..373374164bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); + static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, @@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(tonga_golden_common_all)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_STONEY: chip_name = "stoney"; break; @@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: ret = amdgpu_atombios_get_gfx_info(adev); if (ret) return ret; @@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) break; case CHIP_POLARIS11: + case CHIP_POLARIS12: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | @@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) *rconf1 |= 0x0; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | SE_XSEL(1) | SE_YSEL(1); *rconf1 |= 0x0; @@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) cz_enable_cp_power_gating(adev, true); else cz_enable_cp_power_gating(adev, false); - } else if (adev->asic_type == CHIP_POLARIS11) { + } else if ((adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { gfx_v8_0_init_csb(adev); gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); @@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) { + adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS12) { tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); tmp &= ~0x3; WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); @@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x0000002A); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x00000000); break; @@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) (adev->asic_type == CHIP_FIJI) || (adev->asic_type == CHIP_STONEY) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS10)) { + (adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS12)) { WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, @@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) mqd->cp_hqd_persistent_state = tmp; if (adev->asic_type == CHIP_STONEY || adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) { + adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS12) { tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1); WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp); @@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - if (adev->asic_type == CHIP_POLARIS11) + if ((adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_SMC, @@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); else diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 45a573e63d4a..e2b0b1646f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); MODULE_FIRMWARE("radeon/verde_mc.bin"); MODULE_FIRMWARE("radeon/oland_mc.bin"); +MODULE_FIRMWARE("radeon/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 @@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) const char *chip_name; char fw_name[30]; int err; + bool is_58_fw = false; DRM_DEBUG("\n"); @@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + /* this memory configuration requires special firmware */ + if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + is_58_fw = true; + + if (is_58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); err = request_firmware(&adev->mc.fw, fw_name, adev->dev); if (err) goto out; @@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_CONTEXT1_CNTL, VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); + ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) + gmc_v6_0_set_fault_enable_default(adev, false); + else + gmc_v6_0_set_fault_enable_default(adev, true); gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + else + return 0; } static int gmc_v6_0_sw_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 0daac3a5be79..476bc9f1954b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1170a64a3184..034ace79ed49 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = @@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 6c65a1a2de79..6e150db8f380 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -56,7 +56,6 @@ #define BIOS_SCRATCH_4 0x5cd MODULE_FIRMWARE("radeon/tahiti_smc.bin"); -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); MODULE_FIRMWARE("radeon/verde_smc.bin"); @@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; @@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6817) || (adev->pdev->device == 0x6806)) max_mclk = 120000; - } else if (adev->asic_type == CHIP_VERDE) { - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6820) || - (adev->pdev->device == 0x6821) || - (adev->pdev->device == 0x6822) || - (adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682A) || - (adev->pdev->device == 0x682B)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } } else if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || @@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; } } /* Apply dpm quirks */ @@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) chip_name = "tahiti"; break; case CHIP_PITCAIRN: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811) || - (adev->pdev->device == 0x6816) || - (adev->pdev->device == 0x6817) || - (adev->pdev->device == 0x6806)) + if ((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811))) chip_name = "pitcairn_k"; else chip_name = "pitcairn"; break; case CHIP_VERDE: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6820) || - (adev->pdev->device == 0x6821) || - (adev->pdev->device == 0x6822) || - (adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682A) || - (adev->pdev->device == 0x682B)) + if (((adev->pdev->device == 0x6820) && + ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83))) || + ((adev->pdev->device == 0x6821) && + ((adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87))) || + ((adev->pdev->revision == 0x87) && + ((adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682b)))) chip_name = "verde_k"; else chip_name = "verde"; break; case CHIP_OLAND: - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) + if (((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6600) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605) || + (adev->pdev->device == 0x6610))) || + ((adev->pdev->revision == 0x83) && + (adev->pdev->device == 0x6610))) chip_name = "oland_k"; else chip_name = "oland"; break; case CHIP_HAINAN: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6664) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)) + if (((adev->pdev->revision == 0x81) && + (adev->pdev->device == 0x6660)) || + ((adev->pdev->revision == 0x83) && + ((adev->pdev->device == 0x6660) || + (adev->pdev->device == 0x6663) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)))) chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; else chip_name = "hainan"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 96444e4d862a..7fb9137dd89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -40,13 +40,14 @@ #include "smu/smu_7_0_1_sh_mask.h" static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); -static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v4_2_start(struct amdgpu_device *adev); static void uvd_v4_2_stop(struct amdgpu_device *adev); static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); +static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, + bool sw_mode); /** * uvd_v4_2_ring_get_rptr - get read pointer * @@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle) return r; } - +static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, + bool enable); /** * uvd_v4_2_hw_init - start and test UVD block * @@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle) uint32_t tmp; int r; - uvd_v4_2_init_cg(adev); - uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); + uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); r = uvd_v4_2_start(adev); if (r) @@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz; int i, j, r; - /* disable byte swapping */ u32 lmi_swap_cntl = 0; u32 mp_swap_cntl = 0; + WREG32(mmUVD_CGC_GATE, 0); + uvd_v4_2_set_dcm(adev, true); + uvd_v4_2_mc_resume(adev); /* disable interupt */ @@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + uvd_v4_2_set_dcm(adev, false); } /** @@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); } -static void uvd_v4_2_init_cg(struct amdgpu_device *adev) -{ - bool hw_mode = true; - - if (hw_mode) { - uvd_v4_2_set_dcm(adev, false); - } else { - u32 tmp = RREG32(mmUVD_CGC_CTRL); - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, tmp); - } -} - static bool uvd_v4_2_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state) { - bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (state == AMD_CG_STATE_GATE) - gate = true; - - uvd_v4_2_enable_mgcg(adev, gate); - return 0; } @@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index a79e283590fb..6de6becce745 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; - - curstate = state; if (enable) { /* wait for STATUS to clear */ if (uvd_v5_0_wait_for_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6b3293a1c7b8..37ca685e5a9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,9 +43,13 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 + #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) @@ -54,6 +58,9 @@ #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ + | GRBM_GFX_INDEX__VCE_ALL_PIPE) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, WREG32(mmVCE_UENC_CLOCK_GATING_2, data); data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - data &= ~0xffc00000; + data &= ~0x3ff; WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); vce_v3_0_mc_resume(adev, idx); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) { u32 tmp; - /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ + /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */ if ((adev->asic_type == CHIP_FIJI) || (adev->asic_type == CHIP_STONEY) || (adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) return AMDGPU_VCE_HARVEST_VCE1; /* Tonga and CZ are dual or single pipe */ @@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) * VCE team suggest use bit 3--bit 6 for busy status check */ mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { @@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index bf088d6d9bf1..c2ac54f11341 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); /* * Indirect registers accessor @@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: default: break; } @@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, case CHIP_TONGA: case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_CARRIZO: case CHIP_STONEY: asic_register_table = cz_allowed_read_registers; @@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x50; break; + case CHIP_POLARIS12: + adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x64; + break; case CHIP_CARRIZO: adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | @@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle, case CHIP_TONGA: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: vi_common_set_clockgating_state_by_smu(adev, state); default: break; @@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: amdgpu_ip_block_add(adev, &vi_common_ip_block); amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); amdgpu_ip_block_add(adev, &tonga_ih_ip_block); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index c02469ada9f1..85f358764bbc 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -23,7 +23,7 @@ #ifndef __AMD_SHARED_H__ #define __AMD_SHARED_H__ -#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */ /* * Supported ASIC types @@ -46,6 +46,7 @@ enum amd_asic_type { CHIP_STONEY, CHIP_POLARIS10, CHIP_POLARIS11, + CHIP_POLARIS12, CHIP_LAST, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b0c63c5f54c9..6bb79c94cb9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_CG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 4b14f259a147..0fb4e8c8f5e1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*EPR# 419220 -HW limitation to to */ - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - + /*Program HardMin based on the vce_arbiter.ecclk */ + if (hwmgr->vce_arbiter.ecclk == 0) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, 0); + /* disable ECLK DPM 0. Otherwise VCE could hang if + * switching SCLK from DPM 0 to 6/7 */ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkSoftMin, 1); + } else { + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index dc6700aee18f..b03606405a53 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; @@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface); - if (hwmgr->chip_id == CHIP_POLARIS11) + if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 26477f0f09dc..6cd1287a7a8f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - } else if (hwmgr->chip_id == CHIP_POLARIS11) { + } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index e5812aa456f3..6e618aa20719 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: polaris10_smum_init(smumgr); break; default: diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 908011d2c8f5..7abda94fc2cf 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,6 +113,7 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; + bool DisableP2A; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f75c6421db62..533e762d036d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } else *need_post = false; + /* Check P2A Access */ + ast->DisableP2A = true; + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) + ast->DisableP2A = false; + /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; + if (ast->DisableP2A == false) { + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x1207c); + data &= 0x300; + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + } } break; } @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) uint32_t data, data2; uint32_t denum, num, div, ref_pll; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - - ast_write32(ast, 0x10000, 0xfc600309); - - do { - if (pci_channel_offline(dev->pdev)) - return -EIO; - } while (ast_read32(ast, 0x10000) != 0x01); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) + if (ast->DisableP2A) + { ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; + } else - ast->dram_bus_width = 32; + { + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x10004); + + if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (data & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (data & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (data & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; + data = ast_read32(ast, 0x10120); + data2 = ast_read32(ast, 0x10170); + if (data2 & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = data & 0x1f; + num = (data & 0x3fe0) >> 5; + data = (data & 0xc000) >> 14; + switch (data) { case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; + div = 0x4; break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; + case 2: + case 1: + div = 0x2; break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; + default: + div = 0x1; break; } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } - - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; - break; - case 2: - case 1: - div = 0x2; - break; - default: - div = 0x1; - break; - } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 810c51d92b99..5331ee1df086 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev) ast_open_key(ast); ast_set_def_ext_reg(dev); - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); - else - ast_init_dram_reg(dev); + if (ast->DisableP2A == false) + { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); - ast_init_3rdtx(dev); + ast_init_3rdtx(dev); + } + else + { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } } /* AST 2300 DRAM settings */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index eb9bf8786c24..18eefdcbf1ba 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, pm_runtime_enable(dev); + pm_runtime_get_sync(dev); phy_power_on(dp->phy); analogix_dp_init_dp(dp); @@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } + phy_power_off(dp->phy); + pm_runtime_put(dev); + return 0; err_disable_pm_runtime: + + phy_power_off(dp->phy); + pm_runtime_put(dev); pm_runtime_disable(dev); return ret; diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index 04b3c161dfae..7f4cc6e172ab 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig @@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU This is a KMS driver for emulated cirrus device in qemu. It is *NOT* intended for real cirrus devices. This requires the modesetting userspace X.org driver. + + Cirrus is obsolete, the hardware was designed in the 90ies + and can't keep up with todays needs. More background: + https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ + + Better alternatives are: + - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) + - qxl (DRM_QXL, qemu -vga qxl, works best with spice) + - virtio (DRM_VIRTIO_GPU), qemu -vga virtio) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 60697482b94c..50f5cf7b69d1 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_get_crtc_state); static void set_out_fence_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc, s64 __user *fence_ptr) + struct drm_crtc *crtc, s32 __user *fence_ptr) { state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; } -static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, +static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc) { - s64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; @@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, state->color_mgmt_changed |= replaced; return ret; } else if (property == config->prop_out_fence_ptr) { - s64 __user *fence_ptr = u64_to_user_ptr(val); + s32 __user *fence_ptr = u64_to_user_ptr(val); if (!fence_ptr) return 0; @@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); */ struct drm_out_fence_state { - s64 __user *out_fence_ptr; + s32 __user *out_fence_ptr; struct sync_file *sync_file; int fd; }; @@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, return 0; for_each_crtc_in_state(state, crtc, crtc_state, i) { - u64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 583f47f27b36..34f757bcabae 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev, if (!nonblock) { ret = drm_atomic_helper_wait_for_fences(dev, state, true); - if (ret) + if (ret) { + drm_atomic_helper_cleanup_planes(dev, state); return ret; + } } /* diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac6a35212501..e6b19bc9021a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, return NULL; mode->type |= DRM_MODE_TYPE_USERDEF; + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ + if (cmd->xres == 1366 && mode->hdisplay == 1368) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); return mode; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ac953f037be7..cf8f0128c161 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) } if (dev->mode_config.delayed_event) { + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ poll = true; - delay = 0; + delay = HZ; } if (poll) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96e8f08..fe0e85b41310 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; + /* + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick + * drm_mm into giving out a low IOVA after address space + * rollover. This needs a proper fix. + */ ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0, mmu->last_iova, ~0UL, - DRM_MM_SEARCH_DEFAULT); + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); if (ret != -ENOSPC) break; diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f3117fe8..75eeb831ed6a 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -46,7 +46,8 @@ enum decon_flag_bits { BIT_CLKS_ENABLED, BIT_IRQS_ENABLED, BIT_WIN_UPDATED, - BIT_SUSPENDED + BIT_SUSPENDED, + BIT_REQUEST_UPDATE }; struct decon_context { @@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) m->crtc_vsync_end = m->crtc_vsync_start + 1; } - decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); - - /* enable clock gate */ - val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; - writel(val, ctx->addr + DECON_CMU); - if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) decon_setup_trigger(ctx); @@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* window enable */ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_disable_plane(struct exynos_drm_crtc *crtc, @@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, return; decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_atomic_flush(struct exynos_drm_crtc *crtc) @@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); - /* standalone update */ - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..f7bce8603958 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -37,13 +37,6 @@ #include "i915_drv.h" #include "gvt.h" -#define MB_TO_BYTES(mb) ((mb) << 20ULL) -#define BYTES_TO_MB(b) ((b) >> 20ULL) - -#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) -#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) -#define HOST_FENCE 4 - static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; @@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, POSTING_READ(fence_reg_lo); } +static void _clear_vgpu_fence(struct intel_vgpu *vgpu) +{ + int i; + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, 0); +} + static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; @@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); + _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - intel_vgpu_write_fence(vgpu, i, 0); list_add_tail(®->link, &dev_priv->mm.fence_list); } @@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) continue; list_del(pos); vgpu->fence.regs[i] = reg; - intel_vgpu_write_fence(vgpu, i, 0); if (++i == vgpu_fence_sz(vgpu)) break; } if (i != vgpu_fence_sz(vgpu)) goto out_free_fence; + _clear_vgpu_fence(vgpu); + mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); return 0; @@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) } /** + * intel_vgpu_reset_resource - reset resource state owned by a vGPU + * @vgpu: a vGPU + * + * This function is used to reset resource state owned by a vGPU. + * + */ +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + intel_runtime_pm_get(dev_priv); + _clear_vgpu_fence(vgpu); + intel_runtime_pm_put(dev_priv); +} + +/** * intel_alloc_vgpu_resource - allocate HW resource for a vGPU * @vgpu: vGPU * @param: vGPU creation params diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 711c31c8d8b4..4a6a2ed65732 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } return 0; } + +/** + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU + * + * @vgpu: a vGPU + * @primary: is the vGPU presented as primary + * + */ +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + u16 *gmch_ctl; + int i; + + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, + info->cfg_space_size); + + if (!primary) { + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + } + + /* Show guest that there isn't any stolen memory.*/ + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); + + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, + gvt_aperture_pa_base(gvt), true); + + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO + | PCI_COMMAND_MEMORY + | PCI_COMMAND_MASTER); + /* + * Clear the bar upper 32bit and let guest to assign the new value + */ + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); + + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { + vgpu->cfg_space.bar[i].size = pci_resource_len( + gvt->dev_priv->drm.pdev, i * 2); + vgpu->cfg_space.bar[i].tracked = false; + } +} + +/** + * intel_vgpu_reset_cfg_space - reset vGPU configuration space + * + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) +{ + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != + INTEL_GVT_PCI_CLASS_VGA_OTHER; + + if (cmd & PCI_COMMAND_MEMORY) { + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); + } + + /** + * Currently we only do such reset when vGPU is not + * owned by any VM, so we simply restore entire cfg + * space to default value. + */ + intel_vgpu_init_cfg_space(vgpu, primary); +} diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092c70e8..e4563984cb1e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -481,7 +481,6 @@ struct parser_exec_state { (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) static unsigned long bypass_scan_mask = 0; -static bool bypass_batch_buffer_scan = true; /* ring ALL, type = 0 */ static struct sub_op_bits sub_op_mi[] = { @@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (bypass_batch_buffer_scan) - return 0; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..34083731669d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) - -#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) -#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) -static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, - unsigned long add, int gmadr_bytes) -{ - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) - return -1; - - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & - BATCH_BUFFER_ADDR_MASK; - if (gmadr_bytes == 8) { - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = - add & BATCH_BUFFER_ADDR_HIGH_MASK; - } - - return 0; -} - static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_shadow_bb_entry *entry_obj; /* pin the gem object to ggtt */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { + struct i915_vma *vma; - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, - 4, 0); - if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); - return; - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - set_gma_to_bb_cmd(entry_obj, - i915_ggtt_offset(vma), - gmadr_bytes); + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); + if (IS_ERR(vma)) { + gvt_err("Cannot pin\n"); + return; } + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + /* update the relocate gma with shadow batch buffer*/ + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); + if (gmadr_bytes == 8) + entry_obj->bb_start_cmd_va[2] = 0; } } @@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&vgpu->workload_q_head[i]); } - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5b2ce2..47dec4acf7ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; - u64 pte; -#ifdef readq - pte = readq(addr); -#else - pte = ioread32(addr); - pte |= (u64)ioread32(addr + 4) << 32; -#endif - return pte; + return readq(addr); } static void write_pte64(struct drm_i915_private *dev_priv, @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; -#ifdef writeq writeq(pte, addr); -#else - iowrite32((u32)pte, addr); - iowrite32(pte >> 32, addr + 4); -#endif + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); } @@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) info->gtt_entry_size; mem = kzalloc(mm->has_shadow_page_table ? mm->page_table_entry_size * 2 - : mm->page_table_entry_size, - GFP_ATOMIC); + : mm->page_table_entry_size, GFP_KERNEL); if (!mem) return -ENOMEM; mm->virtual_page_table = mem; @@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm; int ret; - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto fail; @@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; - struct page *scratch_pt; + void *scratch_pt; unsigned long mfn; int i; - void *p; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - p = kmap_atomic(scratch_pt); - mfn = intel_gvt_hypervisor_virt_to_mfn(p); + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); - kunmap_atomic(p); - __free_page(scratch_pt); + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); + free_page((unsigned long)scratch_pt); return -EFAULT; } gtt->scratch_pt[type].page_mfn = mfn; - gtt->scratch_pt[type].page = scratch_pt; + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, mfn); @@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { @@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, se.val64 |= PPAT_CACHED_INDEX; for (i = 0; i < page_entry_num; i++) - ops->set_entry(p, &se, i, false, 0, vgpu); + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - kunmap_atomic(p); - return 0; } @@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; - void *page_addr; + void *page; gvt_dbg_core("init gtt\n"); @@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } - gvt->gtt.scratch_ggtt_page = - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); - if (!gvt->gtt.scratch_ggtt_page) { + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) { gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); - page_addr = page_address(gvt->gtt.scratch_ggtt_page); - - gvt->gtt.scratch_ggtt_mfn = - intel_gvt_hypervisor_virt_to_mfn(page_addr); + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to translate scratch ggtt page\n"); __free_page(gvt->gtt.scratch_ggtt_page); @@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) for (offset = 0; offset < num_entries; offset++) ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); } + +/** + * intel_vgpu_reset_gtt - reset the all GTT related status + * @vgpu: a vGPU + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset + * + * This function is called from vfio core to reset reset all + * GTT related status, including GGTT, PPGTT, scratch page. + * + */ +void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) +{ + int i; + + ppgtt_free_all_shadow_page(vgpu); + if (!dmlr) + return; + + intel_vgpu_reset_ggtt(vgpu); + + /* clear scratch page for security */ + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) + memset(page_address(vgpu->gtt.scratch_pt[i].page), + 0, PAGE_SIZE); + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b315ab3593ec..f88eb5e89bea 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd..e6bf5c533fbe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_clean_vgpu_types(gvt); + idr_destroy(&gvt->vgpu_idr); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("init gvt device\n"); + idr_init(&gvt->vgpu_idr); + mutex_init(&gvt->lock); gvt->dev_priv = dev_priv; @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_setup_mmio_info(gvt); if (ret) - return ret; + goto out_clean_idr; ret = intel_gvt_load_firmware(gvt); if (ret) @@ -313,6 +317,8 @@ out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); kfree(gvt); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0af17016f33f..e227caf5859e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -323,6 +323,7 @@ struct intel_vgpu_creation_params { int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, struct intel_vgpu_creation_params *param); +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); @@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); @@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index); +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary); +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); -int setup_vgpu_mmio(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu); struct intel_gvt_ops { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..ab2ea157da4c 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, static int new_mmio_info(struct intel_gvt *gvt, u32 offset, u32 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, - void *read, void *write) + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) { struct intel_gvt_mmio_info *info, *p; u32 start, end, i; @@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, default: /*should not hit here*/ gvt_err("invalid forcewake offset 0x%x\n", offset); - return 1; + return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; @@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, return 0; } -static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes, unsigned long bitmap) -{ - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; - - vgpu->resetting = true; - - intel_vgpu_stop_schedule(vgpu); - /* - * The current_vgpu will set to NULL after stopping the - * scheduler when the reset is triggered by current vgpu. - */ - if (scheduler->current_vgpu == NULL) { - mutex_unlock(&vgpu->gvt->lock); - intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&vgpu->gvt->lock); - } - - intel_vgpu_reset_execlist(vgpu, bitmap); - - /* full GPU reset */ - if (bitmap == 0xff) { - mutex_unlock(&vgpu->gvt->lock); - intel_vgpu_clean_gtt(vgpu); - mutex_lock(&vgpu->gvt->lock); - setup_vgpu_mmio(vgpu); - populate_pvinfo_page(vgpu); - intel_vgpu_init_gtt(vgpu); - } - - vgpu->resetting = false; - - return 0; -} - static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) + void *p_data, unsigned int bytes) { + unsigned int engine_mask = 0; u32 data; - u64 bitmap = 0; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & GEN6_GRDOM_FULL) { gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); - bitmap = 0xff; - } - if (data & GEN6_GRDOM_RENDER) { - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - bitmap |= (1 << RCS); - } - if (data & GEN6_GRDOM_MEDIA) { - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - bitmap |= (1 << VCS); - } - if (data & GEN6_GRDOM_BLT) { - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - bitmap |= (1 << BCS); - } - if (data & GEN6_GRDOM_VECS) { - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - bitmap |= (1 << VECS); - } - if (data & GEN8_GRDOM_MEDIA2) { - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - bitmap |= (1 << VCS2); + engine_mask = ALL_ENGINES; + } else { + if (data & GEN6_GRDOM_RENDER) { + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); + engine_mask |= (1 << RCS); + } + if (data & GEN6_GRDOM_MEDIA) { + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); + engine_mask |= (1 << VCS); + } + if (data & GEN6_GRDOM_BLT) { + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); + engine_mask |= (1 << BCS); + } + if (data & GEN6_GRDOM_VECS) { + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); + engine_mask |= (1 << VECS); + } + if (data & GEN8_GRDOM_MEDIA2) { + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); + if (HAS_BSD2(vgpu->gvt->dev_priv)) + engine_mask |= (1 << VCS2); + } } - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); + + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + + return 0; } static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, @@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, +static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; @@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int rc = 0; unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); @@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, id = VECS; break; default: - rc = -EINVAL; - break; + return -EINVAL; } set_bit(id, (void *)vgpu->tlb_handle_pending); - return rc; + return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07ae487..3f656e3a6e5a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, return NULL; } -static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) { struct intel_vgpu_type *type; unsigned int num = 0; @@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, type->fence); } -static MDEV_TYPE_ATTR_RO(available_instance); +static MDEV_TYPE_ATTR_RO(available_instances); static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(description); static struct attribute *type_attrs[] = { - &mdev_type_attr_available_instance.attr, + &mdev_type_attr_available_instances.attr, &mdev_type_attr_device_api.attr, &mdev_type_attr_description.attr, NULL, @@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) struct intel_vgpu_type *type; struct device *pdev; void *gvt; + int ret; pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; @@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) if (!type) { gvt_err("failed to find type %s to create\n", kobject_name(kobj)); - return -EINVAL; + ret = -EINVAL; + goto out; } vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { - gvt_err("create intel vgpu failed\n"); - return -EINVAL; + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); + gvt_err("failed to create intel vgpu: %d\n", ret); + goto out; } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); @@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - return 0; + ret = 0; + +out: + return ret; } static int intel_vgpu_remove(struct mdev_device *mdev) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a1946..4df078bc5d04 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); - if (!mmio && !vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); - - if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); - vgpu->mmio.disable_warn_untrack = true; - } - } - if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); - } else + } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); + if (!vgpu->mmio.disable_warn_untrack) { + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", + vgpu->id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + gvt_err("------------------------------------------\n"); + gvt_err("vgpu%d: likely triggers a gfx reset\n", + vgpu->id); + gvt_err("------------------------------------------\n"); + vgpu->mmio.disable_warn_untrack = true; + } + } + } + if (ret) goto err; @@ -302,3 +303,56 @@ err: mutex_unlock(&gvt->lock); return ret; } + + +/** + * intel_vgpu_reset_mmio - reset virtual MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); + + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + + /* set the bit 0:2(Core C-State ) to C0 */ + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; +} + +/** + * intel_vgpu_init_mmio - init MMIO space + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + if (!vgpu->mmio.vreg) + return -ENOMEM; + + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + + intel_vgpu_reset_mmio(vgpu); + + return 0; +} + +/** + * intel_vgpu_clean_mmio - clean MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) +{ + vfree(vgpu->mmio.vreg); + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e366a3..3bc620f56f35 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, *offset; \ }) +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); + int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921770c6..d9fb41ab7119 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | - GFP_DMA32 | __GFP_ZERO, - INTEL_GVT_OPREGION_PORDER); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) return -ENOMEM; @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, - INTEL_GVT_OPREGION_PORDER); + get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02..fbd023a16f18 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -50,8 +50,7 @@ #define INTEL_GVT_OPREGION_PARM 0x204 #define INTEL_GVT_OPREGION_PAGES 2 -#define INTEL_GVT_OPREGION_PORDER 1 -#define INTEL_GVT_OPREGION_SIZE (2 * 4096) +#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db242250235..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); } @@ -459,11 +459,11 @@ complete: gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - complete_current_workload(gvt, ring_id); - if (workload->req) i915_gem_request_put(fetch_and_zero(&workload->req)); + complete_current_workload(gvt, ring_id); + if (need_force_wake) intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28bff51..2833dfa8c9ae 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { struct drm_i915_gem_object *obj; void *va; unsigned long len; - void *bb_start_cmd_va; + u32 *bb_start_cmd_va; }; #define workload_q_head(vgpu, ring_id) \ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d5777..7295bc8e12fb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -35,79 +35,6 @@ #include "gvt.h" #include "i915_pvinfo.h" -static void clean_vgpu_mmio(struct intel_vgpu *vgpu) -{ - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; -} - -int setup_vgpu_mmio(struct intel_vgpu *vgpu) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - - if (vgpu->mmio.vreg) - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); - else { - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; - } - - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); - - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; - - /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; - return 0; -} - -static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - u16 *gmch_ctl; - int i; - - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, - info->cfg_space_size); - - if (!param->primary) { - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - } - - /* Show guest that there isn't any stolen memory.*/ - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); - - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, - gvt_aperture_pa_base(gvt), true); - - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO - | PCI_COMMAND_MEMORY - | PCI_COMMAND_MASTER); - /* - * Clear the bar upper 32bit and let guest to assign the new value - */ - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } -} - void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ @@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) if (low_avail / min_low == 0) break; gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].avail_instance = gvt->types[i].max_instance; @@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; @@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); vfree(vgpu); intel_gvt_update_vgpu_types(gvt); @@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->gvt = gvt; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); - setup_vgpu_cfg_space(vgpu, param); + intel_vgpu_init_cfg_space(vgpu, param->primary); - ret = setup_vgpu_mmio(vgpu); + ret = intel_vgpu_init_mmio(vgpu); if (ret) - goto out_free_vgpu; + goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, param); if (ret) @@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); +out_clean_idr: + idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); mutex_unlock(&gvt->lock); @@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, } /** - * intel_gvt_reset_vgpu - reset a virtual GPU + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset + * @vgpu: virtual GPU + * @dmlr: vGPU Device Model Level Reset or GT Reset + * @engine_mask: engines to reset for GT reset + * + * This function is called when user wants to reset a virtual GPU through + * device model reset or GT reset. The caller should hold the gvt lock. + * + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset + * the whole vGPU to default state as when it is created. This vGPU function + * is required both for functionary and security concerns.The ultimate goal + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we + * assign a vGPU to a virtual machine we must isse such reset first. + * + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. + * Unlike the FLR, GT reset only reset particular resource of a vGPU per + * the reset request. Guest driver can issue a GT reset by programming the + * virtual GDRST register to reset specific virtual GPU engine or all + * engines. + * + * The parameter dev_level is to identify if we will do DMLR or GT reset. + * The parameter engine_mask is to specific the engines that need to be + * resetted. If value ALL_ENGINES is given for engine_mask, it means + * the caller requests a full GT reset that we will reset all virtual + * GPU engines. For FLR, engine_mask is ignored. + */ +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + + gvt_dbg_core("------------------------------------------\n"); + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", + vgpu->id, dmlr, engine_mask); + vgpu->resetting = true; + + intel_vgpu_stop_schedule(vgpu); + /* + * The current_vgpu will set to NULL after stopping the + * scheduler when the reset is triggered by current vgpu. + */ + if (scheduler->current_vgpu == NULL) { + mutex_unlock(&gvt->lock); + intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&gvt->lock); + } + + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + + /* full GPU reset or device model level reset */ + if (engine_mask == ALL_ENGINES || dmlr) { + intel_vgpu_reset_gtt(vgpu, dmlr); + intel_vgpu_reset_resource(vgpu); + intel_vgpu_reset_mmio(vgpu); + populate_pvinfo_page(vgpu); + + if (dmlr) + intel_vgpu_reset_cfg_space(vgpu); + } + + vgpu->resetting = false; + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); + gvt_dbg_core("------------------------------------------\n"); +} + +/** + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) * @vgpu: virtual GPU * * This function is called when user wants to reset a virtual GPU. @@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->lock); + intel_gvt_reset_vgpu_locked(vgpu, true, 0); + mutex_unlock(&vgpu->gvt->lock); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 445fec9c2841..b2c4a0b8a627 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(dev_priv); - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); DRM_DEBUG_KMS("Device suspended\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 243224aeabf8..69bc3b0c4390 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1977,6 +1977,11 @@ struct drm_i915_private { struct i915_frontbuffer_tracking fb_tracking; + struct intel_atomic_helper { + struct llist_head free_list; + struct work_struct free_work; + } atomic_helper; + u16 orig_clock; bool mchbar_need_disable; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dd7fc662859..4b23a7814713 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); - int ret; /* We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ - lockdep_assert_held(&obj->base.dev->struct_mutex); - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); - if (ret) - return ret; - intel_fb_obj_invalidate(obj, ORIGIN_CPU); - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) { - ret = -EFAULT; - goto out; - } - } + if (copy_from_user(vaddr, user_data, args->size)) + return -EFAULT; drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(to_i915(dev)); + i915_gem_chipset_flush(to_i915(obj->base.dev)); -out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); - return ret; + return 0; } void *i915_gem_object_alloc(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814b015c..d534a316a16e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -199,6 +199,7 @@ found: } /* Unbinding will emit any required flushes */ + ret = 0; while (!list_empty(&eviction_list)) { vma = list_first_entry(&eviction_list, struct i915_vma, diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a792dcb902b5..e924a9516079 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, return ret; } + trace_i915_vma_bind(vma, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 86ecec5601d4..588470eb8d39 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; + bool ret = false; BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); @@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) */ if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); - return true; + ret = true; + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } - - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); } kfree(edid); - return false; + return ret; } static enum drm_connector_status diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3dc8724df400..77f7b1d849a4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * We only keep the x/y offsets, so push all of the * gtt offset into the x/y offsets. */ - _intel_adjust_tile_offset(&x, &y, tile_size, - tile_width, tile_height, pitch_tiles, + _intel_adjust_tile_offset(&x, &y, + tile_width, tile_height, + tile_size, pitch_tiles, gtt_offset_rotated * tile_size, 0); gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; @@ -2967,6 +2968,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) unsigned int rotation = plane_state->base.rotation; int ret; + if (!plane_state->base.visible) + return 0; + /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->base.src, @@ -6846,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) } state = drm_atomic_state_alloc(crtc->dev); + if (!state) { + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", + crtc->base.id, crtc->name); + return; + } + state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; /* Everything's already locked, -EDEADLK can't happen. */ @@ -11243,6 +11253,7 @@ found: } old->restore_state = restore_state; + drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev_priv, intel_crtc->pipe); @@ -14512,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, break; case FENCE_FREE: - drm_atomic_state_put(&state->base); - break; + { + struct intel_atomic_helper *helper = + &to_i915(state->base.dev)->atomic_helper; + + if (llist_add(&state->freed, &helper->free_list)) + schedule_work(&helper->free_work); + break; + } } return NOTIFY_DONE; @@ -16392,6 +16409,18 @@ fail: drm_modeset_acquire_fini(&ctx); } +static void intel_atomic_helper_free_state(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), atomic_helper.free_work); + struct intel_atomic_state *state, *next; + struct llist_node *freed; + + freed = llist_del_all(&dev_priv->atomic_helper.free_list); + llist_for_each_entry_safe(state, next, freed, freed) + drm_atomic_state_put(&state->base); +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16411,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; + INIT_WORK(&dev_priv->atomic_helper.free_work, + intel_atomic_helper_free_state); + intel_init_quirks(dev); intel_init_pm(dev_priv); @@ -17024,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev) if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - drm_atomic_state_put(state); + if (state) + drm_atomic_state_put(state); } void intel_modeset_gem_init(struct drm_device *dev) @@ -17094,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_work(&dev_priv->atomic_helper.free_work); + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); + intel_disable_gt_powersave(dev_priv); /* diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd132c216a67..cd72ae171eeb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -370,6 +370,8 @@ struct intel_atomic_state { struct skl_wm_values wm_results; struct i915_sw_fence commit_ready; + + struct llist_node freed; }; struct intel_plane_state { diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index beb08982dc0b..8cf2d80f2254 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + if (!ifbdev) + return; + ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d4961fa20c73..beabc17e7c8a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t *batch, uint32_t index) { - struct drm_i915_private *dev_priv = engine->i915; uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); - /* - * WaDisableLSQCROPERFforOCL:kbl - * This WA is implemented in skl_init_clock_gating() but since - * this batch updates GEN8_L3SQCREG4 with default value we need to - * set this bit here to retain the WA during flush. - */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) - l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; - wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637dc1fdf..91cb4c422ad5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE); - /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes - * involving this register should also be added to WA batch as required. - */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) - /* WaDisableLSQCROPERFforOCL:kbl */ - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | - GEN8_LQSC_RO_PERF_DIS); - /* WaToEnableHwFixForPushConstHWBug:kbl */ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 4942ca090b46..7890e30eb584 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state; struct drm_rect clip = { 0, }; + if (!state->crtc) + return 0; + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index d836b2274531..f7c870172220 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -38,6 +38,11 @@ * - TV Panel encoding via ENCT */ +/* HHI Registers */ +#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ +#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ + struct meson_cvbs_enci_mode meson_cvbs_enci_pal = { .mode_tag = MESON_VENC_MODE_CVBS_PAL, .hso_begin = 3, @@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv) void meson_venc_init(struct meson_drm *priv) { + /* Disable CVBS VDAC */ + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); + + /* Power Down Dacs */ + writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING)); + + /* Disable HDMI PHY */ + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); + + /* Disable HDMI */ + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + /* Disable all encoders */ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index c809c085fd78..a2bcc70a03ef 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder) /* Disable CVBS VDAC */ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); } static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a18126150e11..686a580c711a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, void adreno_flush(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - uint32_t wptr = get_wptr(gpu->rb); + uint32_t wptr; + + /* + * Mask wptr value that we calculate to fit in the HW range. This is + * to account for the possibility that the last command fit exactly into + * the ringbuffer and rb->next hasn't wrapped to zero yet + */ + wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); /* ensure writes to ringbuffer have hit system memory: */ mb(); @@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; @@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->aspace->mmu; - if (mmu) { + if (gpu->aspace && gpu->aspace->mmu) { + struct msm_mmu *mmu = gpu->aspace->mmu; ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5f6cd8745dbc..c396d459a9d0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_plane *plane; - struct drm_plane_state *plane_state; - - for_each_plane_in_state(state, plane, plane_state, i) - mdp5_plane_complete_commit(plane, plane_state); if (mdp5_kms->smp) mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17b0cc101171..cdfc63d90c7b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -104,8 +104,6 @@ struct mdp5_plane_state { /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; - - bool pending : 1; }; #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) @@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index c099da7bc212..25d9d0a97156 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, drm_printf(p, "\tzpos=%u\n", pstate->zpos); drm_printf(p, "\talpha=%u\n", pstate->alpha); drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); - drm_printf(p, "\tpending=%u\n", pstate->pending); } static void mdp5_plane_reset(struct drm_plane *plane) @@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) if (mdp5_state && mdp5_state->base.fb) drm_framebuffer_reference(mdp5_state->base.fb); - mdp5_state->pending = false; - return &mdp5_state->base; } @@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); - /* We don't allow faster-than-vblank updates.. if we did add this - * some day, we would need to disallow in cases where hwpipe - * changes - */ - if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) - return -EBUSY; - max_width = config->hw->lm.max_width << 16; max_height = config->hw->lm.max_height << 16; @@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_plane_state *state = plane->state; - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); DBG("%s: update", plane->name); - mdp5_state->pending = true; - if (plane_enabled(state)) { int ret; @@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return pstate->hwpipe->flush_mask; } -/* called after vsync in thread context */ -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - - pstate->pending = false; -} - /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8bc59c7e261..8098677a3916 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { + if (!priv->aspace[id]) + continue; msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 166e84e4f0d4..489676568a10 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, pagefault_disable(); } - if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { + if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || + !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) { DRM_ERROR("invalid flags: %x\n", submit_bo.flags); ret = -EINVAL; goto out_unlock; @@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob { uint32_t i, last_offset = 0; uint32_t *ptr; - int ret; + int ret = 0; if (offset % 4) { DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); @@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); if (ret) - return -EFAULT; + goto out; if (submit_reloc.submit_offset % 4) { DRM_ERROR("non-aligned reloc offset: %u\n", submit_reloc.submit_offset); - return -EINVAL; + ret = -EINVAL; + goto out; } /* offset in dwords: */ @@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob if ((off >= (obj->base.size / 4)) || (off < last_offset)) { DRM_ERROR("invalid offset %u at reloc %u\n", off, i); - return -EINVAL; + ret = -EINVAL; + goto out; } ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); if (ret) - return ret; + goto out; if (valid) continue; @@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob last_offset = off; } +out: msm_gem_put_vaddr_locked(&obj->base); - return 0; + return ret; } static void submit_cleanup(struct msm_gem_submit *submit) diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index f326cf6a32e6..67b34e069abf 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) struct msm_ringbuffer *ring; int ret; - size = ALIGN(size, 4); /* size should be dword aligned */ + if (WARN_ON(!is_power_of_2(size))) + return ERR_PTR(-EINVAL); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cef08da1da4e..6a157763dfc3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); + if (!dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 59348fc41c77..bc85a45f91cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev, true); - drm_kms_helper_poll_enable(drm_dev); + + if (!drm_dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d5ed5bfdacb..42c1fa53d431 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -165,6 +165,8 @@ struct nouveau_drm { struct backlight_device *backlight; struct list_head bl_connectors; struct work_struct hpd_work; + struct work_struct fbcon_work; + int fbcon_new_state; #ifdef CONFIG_ACPI struct notifier_block acpi_nb; #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2f2a3dcd4ad7..fa2d0a978ccc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); + int state = READ_ONCE(drm->fbcon_new_state); + + if (state == FBINFO_STATE_RUNNING) + pm_runtime_get_sync(drm->dev->dev); + + console_lock(); + if (state == FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_restore(drm->dev); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); + if (state != FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_save_disable(drm->dev); + console_unlock(); + + if (state == FBINFO_STATE_RUNNING) { + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); + } +} + void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - console_lock(); - if (state == FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_restore(dev); - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); - if (state != FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_save_disable(dev); - console_unlock(); - } + + if (!drm->fbcon) + return; + + drm->fbcon_new_state = state; + /* Since runtime resume can happen as a result of a sysfs operation, + * it's possible we already have the console locked. So handle fbcon + * init/deinit from a seperate work thread + */ + schedule_work(&drm->fbcon_work); } int @@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev) return -ENOMEM; drm->fbcon = fbcon; + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 00ea0002b539..e0c143b865f3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -366,11 +366,10 @@ static void radeon_pci_shutdown(struct pci_dev *pdev) { /* if we are running in a VM, make sure the device - * torn down properly on reboot/shutdown. - * unfortunately we can't detect certain - * hypervisors so just do this all the time. + * torn down properly on reboot/shutdown */ - radeon_pci_remove(pdev); + if (radeon_device_is_virtual()) + radeon_pci_remove(pdev); } static int radeon_pmops_suspend(struct device *dev) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ad4d7b8b8322..414776811e71 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin"); MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); MODULE_FIRMWARE("radeon/tahiti_smc.bin"); -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); @@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin"); MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); + +MODULE_FIRMWARE("radeon/si58_mc.bin"); static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); @@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev) int err; int new_fw = 0; bool new_smc = false; + bool si58_fw = false; + bool banks2_fw = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; - /* XXX: figure out which Tahitis need the new ucode */ - if (0) - new_smc = true; new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->device == 0x6810) || - (rdev->pdev->device == 0x6811) || - (rdev->pdev->device == 0x6816) || - (rdev->pdev->device == 0x6817) || - (rdev->pdev->device == 0x6806)) + if ((rdev->pdev->revision == 0x81) && + ((rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811))) new_smc = true; new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_VERDE: chip_name = "VERDE"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6820) || - (rdev->pdev->device == 0x6821) || - (rdev->pdev->device == 0x6822) || - (rdev->pdev->device == 0x6823) || - (rdev->pdev->device == 0x682A) || - (rdev->pdev->device == 0x682B)) + if (((rdev->pdev->device == 0x6820) && + ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83))) || + ((rdev->pdev->device == 0x6821) && + ((rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87))) || + ((rdev->pdev->revision == 0x87) && + ((rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682b)))) new_smc = true; new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_OLAND: chip_name = "OLAND"; - if ((rdev->pdev->revision == 0xC7) || - (rdev->pdev->revision == 0x80) || - (rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6604) || - (rdev->pdev->device == 0x6605)) + if (((rdev->pdev->revision == 0x81) && + ((rdev->pdev->device == 0x6600) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605) || + (rdev->pdev->device == 0x6610))) || + ((rdev->pdev->revision == 0x83) && + (rdev->pdev->device == 0x6610))) new_smc = true; new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_HAINAN: chip_name = "HAINAN"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0xC3) || - (rdev->pdev->device == 0x6664) || - (rdev->pdev->device == 0x6665) || - (rdev->pdev->device == 0x6667)) + if (((rdev->pdev->revision == 0x81) && + (rdev->pdev->device == 0x6660)) || + ((rdev->pdev->revision == 0x83) && + ((rdev->pdev->device == 0x6660) || + (rdev->pdev->device == 0x6663) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)))) new_smc = true; + else if ((rdev->pdev->revision == 0xc3) && + (rdev->pdev->device == 0x6665)) + banks2_fw = true; new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } + /* this memory configuration requires special firmware */ + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + si58_fw = true; + DRM_INFO("Loading %s Microcode\n", new_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); @@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + if (si58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); @@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev) } } - if (new_smc) + if (banks2_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); + else if (new_smc) snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); else snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8b5e697f2549..2944916f7102 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6817) || (rdev->pdev->device == 0x6806)) max_mclk = 120000; - } else if (rdev->family == CHIP_VERDE) { - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6820) || - (rdev->pdev->device == 0x6821) || - (rdev->pdev->device == 0x6822) || - (rdev->pdev->device == 0x6823) || - (rdev->pdev->device == 0x682A) || - (rdev->pdev->device == 0x682B)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (rdev->family == CHIP_OLAND) { - if ((rdev->pdev->revision == 0xC7) || - (rdev->pdev->revision == 0x80) || - (rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6604) || - (rdev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } } else if (rdev->family == CHIP_HAINAN) { if ((rdev->pdev->revision == 0x81) || (rdev->pdev->revision == 0x83) || @@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6665) || (rdev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; } } /* Apply dpm quirks */ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 725dffad5640..6dfdb145f3bb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; struct tilcdc_drm_private *priv = dev->dev_private; - uint32_t stat; + uint32_t stat, reg; stat = tilcdc_read_irqstatus(dev); tilcdc_clear_irqstatus(dev, stat); @@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", __func__, stat); tilcdc_crtc->frame_intact = false; - if (tilcdc_crtc->sync_lost_count++ > - SYNC_LOST_COUNT_LIMIT) { - dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat); - queue_work(system_wq, &tilcdc_crtc->recover_work); - if (priv->rev == 1) + if (priv->rev == 1) { + reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG); + if (reg & LCDC_RASTER_ENABLE) { tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, - LCDC_V1_SYNC_LOST_INT_ENA); - else + LCDC_RASTER_ENABLE); + tilcdc_set(dev, LCDC_RASTER_CTRL_REG, + LCDC_RASTER_ENABLE); + } + } else { + if (tilcdc_crtc->sync_lost_count++ > + SYNC_LOST_COUNT_LIMIT) { + dev_err(dev->dev, + "%s(0x%08x): Sync lost flood detected, recovering", + __func__, stat); + queue_work(system_wq, + &tilcdc_crtc->recover_work); tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_SYNC_LOST); - tilcdc_crtc->sync_lost_count = 0; + tilcdc_crtc->sync_lost_count = 0; + } } } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index a0fd3e66bc4b..7aadce1f7e7a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, } - __drm_atomic_helper_crtc_destroy_state(state); + drm_atomic_helper_crtc_destroy_state(crtc, state); } static const struct drm_crtc_funcs vc4_crtc_funcs = { diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index db920771bfb5..ab3016982466 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) args->shader_rec_count); struct vc4_bo *bo; - if (uniforms_offset < shader_rec_offset || + if (shader_rec_offset < args->bin_cl_size || + uniforms_offset < shader_rec_offset || exec_size < uniforms_offset || args->shader_rec_count >= (UINT_MAX / sizeof(struct vc4_shader_state)) || temp_size < exec_size) { DRM_ERROR("overflow in exec arguments\n"); + ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 08886a309757..5cdd003605f5 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, } ret = vc4_full_res_bounds_check(exec, *obj, surf); - if (!ret) + if (ret) return ret; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index dd21f950e129..cde9f3758106 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->screen_base = obj->vmap; + info->screen_buffer = obj->vmap; info->screen_size = obj->gem_base.size; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index cff060b56da9..ea36b557d5ee 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 717704e9ae07..c0303f61c26a 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev) struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int brightness; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 5) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } brightness = data[4]; if (brightness < 0 || brightness > 3) { dev_warn(dev, "Read invalid backlight brightness: %02hhx.\n", data[4]); - return -EIO; + ret = -EIO; + goto out; } - return brightness; + ret = brightness; +out: + kfree(data); + + return ret; } static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) @@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); const char *macro_mode; - char data[8]; + char *data; + + data = kmalloc(2, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_GET_MODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 2, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 1) { dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } switch (data[0]) { @@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev, default: dev_warn(dev, "K90 in unknown mode: %02hhx.\n", data[0]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); + ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); +out: + kfree(data); + + return ret; } static ssize_t k90_store_macro_mode(struct device *dev, @@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 8) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); + ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile); +out: + kfree(data); + + return ret; } static ssize_t k90_store_current_profile(struct device *dev, diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index 1b764d1745f3..1689568b597d 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) return rdesc; + if (*rsize < 4) + return rdesc; + for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { rdesc[i] = 0x19; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 54bd22dc1411..f46f2c5117fa 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -816,6 +816,9 @@ #define USB_VENDOR_ID_PETALYNX 0x18b1 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 +#define USB_VENDOR_ID_PETZL 0x2122 +#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234 + #define USB_VENDOR_ID_PHILIPS 0x0471 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 78fb32a7b103..ea3c3546cef7 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b9779bcbd140..8aeca038cc73 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev) return retval; } + if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) + wacom_wac->shared->touch = hdev; + else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) + wacom_wac->shared->pen = hdev; + out: mutex_unlock(&wacom_udev_list_lock); return retval; @@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) if (error) goto fail; - error = wacom_add_shared_data(hdev); - if (error) - goto fail; - /* * Bamboo Pad has a generic hid handling for the Pen, and we switch it * into debug mode for the touch part. @@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) wacom_update_name(wacom, wireless ? " (WL)" : ""); - if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) - wacom_wac->shared->touch = hdev; - else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) - wacom_wac->shared->pen = hdev; + error = wacom_add_shared_data(hdev); + if (error) + goto fail; if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && (features->quirks & WACOM_QUIRK_BATTERY)) { diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index b1a9a3ca6d56..0884dc9554fd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) wacom_report_events(hdev, report); + /* + * Non-input reports may be sent prior to the device being + * completely initialized. Since only their events need + * to be processed, exit after 'wacom_report_events' has + * been called to prevent potential crashes in the report- + * processing functions. + */ + if (report->type != HID_INPUT_REPORT) + return; + if (WACOM_PAD_FIELD(field)) { wacom_wac_pad_battery_report(hdev, report); if (wacom->wacom_wac.pad_input) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index c2268cdf38e8..e34d82e79b98 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, u8 command, int size, union i2c_smbus_data *data) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); + unsigned short piix4_smba = adapdata->smba; + int retries = MAX_TIMEOUT; + int smbslvcnt; u8 smba_en_lo; u8 port; int retval; + /* Request the SMBUS semaphore, avoid conflicts with the IMC */ + smbslvcnt = inb_p(SMBSLVCNT); + do { + outb_p(smbslvcnt | 0x10, SMBSLVCNT); + + /* Check the semaphore status */ + smbslvcnt = inb_p(SMBSLVCNT); + if (smbslvcnt & 0x10) + break; + + usleep_range(1000, 2000); + } while (--retries); + /* SMBus is still owned by the IMC, we give up */ + if (!retries) + return -EBUSY; + mutex_lock(&piix4_mutex_sb800); outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); @@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, mutex_unlock(&piix4_mutex_sb800); + /* Release the semaphore */ + outb_p(smbslvcnt | 0x20, SMBSLVCNT); + return retval; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index cf9e396d7702..583e95042a21 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev) if (!client->irq) { int irq = -ENOENT; - if (dev->of_node) { + if (client->flags & I2C_CLIENT_HOST_NOTIFY) { + dev_dbg(dev, "Using Host Notify IRQ\n"); + irq = i2c_smbus_host_notify_to_irq(client); + } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); if (irq == -EINVAL || irq == -ENODATA) irq = of_irq_get(dev->of_node, 0); @@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev) } if (irq == -EPROBE_DEFER) return irq; - /* - * ACPI and OF did not find any useful IRQ, try to see - * if Host Notify can be used. - */ - if (irq < 0) { - dev_dbg(dev, "Using Host Notify IRQ\n"); - irq = i2c_smbus_host_notify_to_irq(client); - } + if (irq < 0) irq = 0; @@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, if (i2c_check_addr_validity(addr, info.flags)) { dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", - info.addr, node->full_name); + addr, node->full_name); return ERR_PTR(-EINVAL); } @@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, info.of_node = of_node_get(node); info.archdata = &dev_ad; + if (of_property_read_bool(node, "host-notify")) + info.flags |= I2C_CLIENT_HOST_NOTIFY; + if (of_get_property(node, "wakeup-source", NULL)) info.flags |= I2C_CLIENT_WAKE; @@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) int ret; if (!client || !slave_cb) { - WARN(1, "insufficent data\n"); + WARN(1, "insufficient data\n"); return -EINVAL; } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 66f323fd3982..6f638bbc922d 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, unsigned long arg) { struct i2c_smbus_ioctl_data data_arg; - union i2c_smbus_data temp; + union i2c_smbus_data temp = {}; int datasize, res; if (copy_from_user(&data_arg, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e7dcfac877ca..3e70a9c5d79d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, if (!src_addr || !src_addr->sa_family) { src_addr = (struct sockaddr *) &id->route.addr.src_addr; src_addr->sa_family = dst_addr->sa_family; - if (dst_addr->sa_family == AF_INET6) { + if (IS_ENABLED(CONFIG_IPV6) && + dst_addr->sa_family == AF_INET6) { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 1e62a5f0cb28..4609b921f899 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); if (access & IB_ACCESS_ON_DEMAND) { + put_pid(umem->pid); ret = ib_umem_odp_get(context, umem); if (ret) { kfree(umem); @@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { + put_pid(umem->pid); kfree(umem); return ERR_PTR(-ENOMEM); } diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9d5fe1853da4..6262dc035f3c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev, memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); if (!netif_carrier_ok(netdev)) props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index f1510cc76d2d..9398143d7c5e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) skb_trim(skb, dlen); mutex_lock(&ep->com.mutex); - /* update RX credits */ - update_rx_credits(ep, dlen); - switch (ep->com.state) { case MPA_REQ_SENT: + update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: + update_rx_credits(ep, dlen); ep->rcv_seq += dlen; disconnect = process_mpa_request(ep, skb); break; case FPDU_MODE: { struct c4iw_qp_attributes attrs; + + update_rx_credits(ep, dlen); BUG_ON(!ep->com.qp); if (status) pr_err("%s Unexpected streaming data." \ diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 19c6477af19f..bec82a600d77 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, } /* + * Special cqe for drain WR completions... + */ + if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + *cookie = CQE_DRAIN_COOKIE(hw_cqe); + *cqe = *hw_cqe; + goto skip_cqe; + } + + /* * Gotta tweak READ completions: * 1) the cqe doesn't contain the sq_wptr from the wr. * 2) opcode not reflected from the wr. @@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; + case C4IW_DRAIN_OPCODE: + wc->opcode = IB_WC_SEND; + break; default: printk(KERN_ERR MOD "Unexpected opcode %d " "in the CQE received for QPID=0x%0x\n", @@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) } } out: - if (wq) { - if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) { - if (t4_sq_empty(wq)) - complete(&qhp->sq_drained); - if (t4_rq_empty(wq)) - complete(&qhp->rq_drained); - } + if (wq) spin_unlock(&qhp->lock); - } return ret; } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 516b0ae6dc3f..40c0e7b9fc6e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) } } + rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); + if (!rdev->free_workq) { + err = -ENOMEM; + goto err_free_status_page; + } + rdev->status_page->db_off = 0; return 0; +err_free_status_page: + free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); destroy_rqtpool: @@ -862,6 +870,7 @@ destroy_resource: static void c4iw_rdev_close(struct c4iw_rdev *rdev) { + destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4788e1a46fde..8cd4d054a87e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -45,6 +45,7 @@ #include <linux/kref.h> #include <linux/timer.h> #include <linux/io.h> +#include <linux/workqueue.h> #include <asm/byteorder.h> @@ -107,6 +108,7 @@ struct c4iw_dev_ucontext { struct list_head qpids; struct list_head cqids; struct mutex lock; + struct kref kref; }; enum c4iw_rdev_flags { @@ -183,6 +185,7 @@ struct c4iw_rdev { atomic_t wr_log_idx; struct wr_log_entry *wr_log; int wr_log_size; + struct workqueue_struct *free_workq; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -480,8 +483,8 @@ struct c4iw_qp { wait_queue_head_t wait; struct timer_list timer; int sq_sig_all; - struct completion rq_drained; - struct completion sq_drained; + struct work_struct free_work; + struct c4iw_ucontext *ucontext; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) @@ -495,6 +498,7 @@ struct c4iw_ucontext { u32 key; spinlock_t mmap_lock; struct list_head mmaps; + struct kref kref; }; static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) @@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) return container_of(c, struct c4iw_ucontext, ibucontext); } +void _c4iw_free_ucontext(struct kref *kref); + +static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) +{ + kref_put(&ucontext->kref, _c4iw_free_ucontext); +} + +static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) +{ + kref_get(&ucontext->kref); +} + struct c4iw_mm_entry { struct list_head entry; u64 addr; @@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } +#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN + static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | @@ -997,8 +1015,6 @@ extern int c4iw_wr_log; extern int db_fc_threshold; extern int db_coalescing_threshold; extern int use_dsgl; -void c4iw_drain_rq(struct ib_qp *qp); -void c4iw_drain_sq(struct ib_qp *qp); void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 49b51b7e0fd7..3345e1c312f7 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, return -ENOSYS; } -static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +void _c4iw_free_ucontext(struct kref *kref) { - struct c4iw_dev *rhp = to_c4iw_dev(context->device); - struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); + struct c4iw_ucontext *ucontext; + struct c4iw_dev *rhp; struct c4iw_mm_entry *mm, *tmp; - PDBG("%s context %p\n", __func__, context); + ucontext = container_of(kref, struct c4iw_ucontext, kref); + rhp = to_c4iw_dev(ucontext->ibucontext.device); + + PDBG("%s ucontext %p\n", __func__, ucontext); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); kfree(ucontext); +} + +static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +{ + struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); + + PDBG("%s context %p\n", __func__, context); + c4iw_put_ucontext(ucontext); return 0; } @@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); + kref_init(&context->kref); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (!warned++) @@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, memset(props, 0, sizeof(struct ib_port_attr)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); if (!netif_carrier_ok(netdev)) props->state = IB_PORT_DOWN; @@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.get_dev_fw_str = get_dev_fw_str; - dev->ibdev.drain_sq = c4iw_drain_sq; - dev->ibdev.drain_rq = c4iw_drain_rq; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cda5542e13a2..04c1c382dedb 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) return 0; } -static void _free_qp(struct kref *kref) +static void free_qp_work(struct work_struct *work) +{ + struct c4iw_ucontext *ucontext; + struct c4iw_qp *qhp; + struct c4iw_dev *rhp; + + qhp = container_of(work, struct c4iw_qp, free_work); + ucontext = qhp->ucontext; + rhp = qhp->rhp; + + PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); + destroy_qp(&rhp->rdev, &qhp->wq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + + if (ucontext) + c4iw_put_ucontext(ucontext); + kfree(qhp); +} + +static void queue_qp_free(struct kref *kref) { struct c4iw_qp *qhp; qhp = container_of(kref, struct c4iw_qp, kref); PDBG("%s qhp %p\n", __func__, qhp); - kfree(qhp); + queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); } void c4iw_qp_add_ref(struct ib_qp *qp) @@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp) { PDBG("%s ib_qp %p\n", __func__, qp); - kref_put(&to_c4iw_qp(qp)->kref, _free_qp); + kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); } static void add_to_fc_list(struct list_head *head, struct list_head *entry) @@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } +static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *schp; + unsigned long flag; + struct t4_cq *cq; + + schp = to_c4iw_cq(qhp->ibqp.send_cq); + cq = &schp->cq; + + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_TYPE_V(1) | + CQE_SWCQE_V(1) | + CQE_QPID_V(qhp->wq.sq.qid)); + + spin_lock_irqsave(&schp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&schp->lock, flag); + + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); +} + +static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *rchp; + unsigned long flag; + struct t4_cq *cq; + + rchp = to_c4iw_cq(qhp->ibqp.recv_cq); + cq = &rchp->cq; + + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_TYPE_V(0) | + CQE_SWCQE_V(1) | + CQE_QPID_V(qhp->wq.sq.qid)); + + spin_lock_irqsave(&rchp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&rchp->lock, flag); + + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - *bad_wr = wr; - return -EINVAL; + complete_sq_drain_wr(qhp, wr); + return err; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { @@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - *bad_wr = wr; - return -EINVAL; + complete_rq_drain_wr(qhp, wr); + return err; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { @@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, } break; case C4IW_QP_STATE_CLOSING: - if (!internal) { + + /* + * Allow kernel users to move to ERROR for qp draining. + */ + if (!internal && (qhp->ibqp.uobject || attrs->next_state != + C4IW_QP_STATE_ERROR)) { ret = -EINVAL; goto out; } @@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) struct c4iw_dev *rhp; struct c4iw_qp *qhp; struct c4iw_qp_attributes attrs; - struct c4iw_ucontext *ucontext; qhp = to_c4iw_qp(ib_qp); rhp = qhp->rhp; @@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) spin_unlock_irq(&rhp->lock); free_ird(rhp, qhp->attr.max_ird); - ucontext = ib_qp->uobject ? - to_c4iw_ucontext(ib_qp->uobject->context) : NULL; - destroy_qp(&rhp->rdev, &qhp->wq, - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); - c4iw_qp_rem_ref(ib_qp); PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); @@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->attr.max_ird = 0; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); - init_completion(&qhp->sq_drained); - init_completion(&qhp->rq_drained); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); kref_init(&qhp->kref); + INIT_WORK(&qhp->free_work, free_qp_work); ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); if (ret) @@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ma_sync_key_mm->len = PAGE_SIZE; insert_mmap(ucontext, ma_sync_key_mm); } + + c4iw_get_ucontext(ucontext); + qhp->ucontext = ucontext; } qhp->ibqp.qp_num = qhp->wq.sq.qid; init_timer(&(qhp->timer)); @@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; } - -static void move_qp_to_err(struct c4iw_qp *qp) -{ - struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR }; - - (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); -} - -void c4iw_drain_sq(struct ib_qp *ibqp) -{ - struct c4iw_qp *qp = to_c4iw_qp(ibqp); - unsigned long flag; - bool need_to_wait; - - move_qp_to_err(qp); - spin_lock_irqsave(&qp->lock, flag); - need_to_wait = !t4_sq_empty(&qp->wq); - spin_unlock_irqrestore(&qp->lock, flag); - - if (need_to_wait) - wait_for_completion(&qp->sq_drained); -} - -void c4iw_drain_rq(struct ib_qp *ibqp) -{ - struct c4iw_qp *qp = to_c4iw_qp(ibqp); - unsigned long flag; - bool need_to_wait; - - move_qp_to_err(qp); - spin_lock_irqsave(&qp->lock, flag); - need_to_wait = !t4_rq_empty(&qp->wq); - spin_unlock_irqrestore(&qp->lock, flag); - - if (need_to_wait) - wait_for_completion(&qp->rq_drained); -} diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 862381aa83c8..640d22148a3e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -179,6 +179,7 @@ struct t4_cqe { __be32 wrid_hi; __be32 wrid_low; } gen; + u64 drain_cookie; } u; __be64 reserved; __be64 bits_type_ts; @@ -238,6 +239,7 @@ struct t4_cqe { /* generic accessor macros */ #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) +#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie) /* macros for flit 3 of the cqe */ #define CQE_GENBIT_S 63 diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 29e97df9e1a7..4c000d60d5c6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev, memset(props, 0, sizeof(*props)); props->max_mtu = IB_MTU_4096; - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; if (netif_carrier_ok(iwdev->netdev)) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a191b9327b0c..9d8535385bb8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -53,6 +53,7 @@ #include <linux/in.h> #include <linux/etherdevice.h> #include <linux/mlx5/fs.h> +#include <linux/mlx5/vport.h> #include "mlx5_ib.h" #define DRIVER_NAME "mlx5_ib" @@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.response_length += sizeof(resp.cmds_supp_uhw); } + if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { + if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { + mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); + resp.eth_min_inline++; + } + resp.response_length += sizeof(resp.eth_min_inline); + } + /* * We don't want to expose information from the PCI bar that is located * after 4096 bytes, so if the arch only supports larger pages, let's @@ -1704,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v, if (ib_spec->eth.mask.vlan_tag) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index aff9fb14768b..5a31f3c6a421 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr memset(props, 0, sizeof(*props)); props->max_mtu = IB_MTU_4096; - - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; props->lmc = 0; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7b74d09a8217..3ac8aa5ef37d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) return 0; } -void qedr_unaffiliated_event(void *context, - u8 event_code) +void qedr_unaffiliated_event(void *context, u8 event_code) { pr_err("unaffiliated event not implemented yet\n"); } @@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) goto sysfs_err; + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; @@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev) ib_dealloc_device(&dev->ibdev); } -static int qedr_close(struct qedr_dev *dev) +static void qedr_close(struct qedr_dev *dev) { - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); - - return 0; + if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); } static void qedr_shutdown(struct qedr_dev *dev) @@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev) qedr_remove(dev); } +static void qedr_open(struct qedr_dev *dev) +{ + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); +} + static void qedr_mac_address_change(struct qedr_dev *dev) { union ib_gid *sgid = &dev->sgid_tbl[0]; @@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); if (rc) DP_ERR(dev, "Error updating mac filter\n"); @@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) { switch (event) { case QEDE_UP: - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + qedr_open(dev); break; case QEDE_DOWN: qedr_close(dev); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 620badd7d4fb..bb32e4792ec9 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -113,6 +113,8 @@ struct qedr_device_attr { struct qed_rdma_events events; }; +#define QEDR_ENET_STATE_BIT (0) + struct qedr_dev { struct ib_device ibdev; struct qed_dev *cdev; @@ -153,6 +155,8 @@ struct qedr_dev { struct qedr_cq *gsi_sqcq; struct qedr_cq *gsi_rqcq; struct qedr_qp *gsi_qp; + + unsigned long enet_state; }; #define QEDR_MAX_SQ_PBL (0x8000) @@ -188,6 +192,7 @@ struct qedr_dev { #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) #define QEDR_MAX_PORT (1) +#define QEDR_PORT (1) #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) @@ -251,9 +256,6 @@ struct qedr_cq { u16 icid; - /* Lock to protect completion handler */ - spinlock_t comp_handler_lock; - /* Lock to protect multiplem CQ's */ spinlock_t cq_lock; u8 arm_flags; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 63890ebb72bd..a9a8d8745d2e 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) qedr_inc_sw_gsi_cons(&qp->sq); spin_unlock_irqrestore(&qp->q_lock, flags); - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); + if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); - } } void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, @@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, spin_unlock_irqrestore(&qp->q_lock, flags); - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); + if (cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); - } } static void qedr_destroy_gsi_cq(struct qedr_dev *dev, @@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, } if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) - packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; - else packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; + else + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; packet->roce_mode = roce_mode; memcpy(packet->header.vaddr, ud_header_buffer, header_size); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 57c8de208077..c7d6c9a783bd 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibdev); - struct qedr_ucontext *uctx = NULL; - struct qedr_alloc_pd_uresp uresp; struct qedr_pd *pd; u16 pd_id; int rc; @@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, if (!pd) return ERR_PTR(-ENOMEM); - dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + if (rc) + goto err; - uresp.pd_id = pd_id; pd->pd_id = pd_id; if (udata && context) { + struct qedr_alloc_pd_uresp uresp; + + uresp.pd_id = pd_id; + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); - if (rc) + if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); - uctx = get_qedr_ucontext(context); - uctx->pd = pd; - pd->uctx = uctx; + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); + goto err; + } + + pd->uctx = get_qedr_ucontext(context); + pd->uctx->pd = pd; } return &pd->ibpd; + +err: + kfree(pd); + return ERR_PTR(rc); } int qedr_dealloc_pd(struct ib_pd *ibpd) @@ -1600,7 +1610,7 @@ err0: return ERR_PTR(-EFAULT); } -enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) +static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) { switch (qp_state) { case QED_ROCE_QP_STATE_RESET: @@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) return IB_QPS_ERR; } -enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) +static enum qed_roce_qp_state qedr_get_state_from_ibqp( + enum ib_qp_state qp_state) { switch (qp_state) { case IB_QPS_RESET: @@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, int status = 0; if (new_state == qp->state) - return 1; + return 0; switch (qp->state) { case QED_ROCE_QP_STATE_RESET: @@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, /* ERR->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RESET: + if ((qp->rq.prod != qp->rq.cons) || + (qp->sq.prod != qp->sq.cons)) { + DP_NOTICE(dev, + "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", + qp->rq.prod, qp->rq.cons, qp->sq.prod, + qp->sq.cons); + status = -EINVAL; + } break; default: status = -EINVAL; @@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", qp_params.remote_mac_addr); -; qp_params.mtu = qp->mtu; qp_params.lb_indication = false; @@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->qp_state = qedr_get_ibqp_state(params.state); qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); - qp_attr->path_mtu = iboe_get_mtu(params.mtu); + qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); qp_attr->path_mig_state = IB_MIG_MIGRATED; qp_attr->rq_psn = params.rq_psn; qp_attr->sq_psn = params.sq_psn; @@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; - qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; qp_init_attr->cap = qp_attr->cap; memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], @@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr) return rc; } -struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) +static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, + int max_page_list_len) { struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_dev *dev = get_qedr_dev(ibpd->device); @@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp, return 0; } -enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) +static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) { switch (opcode) { case IB_WR_RDMA_WRITE: @@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) } } -inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) +static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) { int wq_is_full, err_wr, pbl_is_full; struct qedr_dev *dev = qp->dev; @@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) return true; } -int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, +static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct qedr_dev *dev = get_qedr_dev(ibqp->device); @@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, IB_WC_SUCCESS, 0); break; case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: - DP_ERR(dev, - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", - cq->icid, qp->icid); + if (qp->state != QED_ROCE_QP_STATE_ERR) + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_WR_FLUSH_ERR, 1); break; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 231a1ce1f4be..bd8fbd3d2032 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "failed to allocate interrupts\n"); ret = -ENOMEM; - goto err_netdevice; + goto err_free_cq_ring; } /* Allocate UAR table. */ @@ -1092,8 +1092,6 @@ err_free_uar_table: err_free_intrs: pvrdma_free_irq(dev); pvrdma_disable_msi_all(dev); -err_netdevice: - unregister_netdevice_notifier(&dev->nb_netdev); err_free_cq_ring: pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); err_free_async_ring: diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 54891370d18a..c2aa52638dcb 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_uc *cmd = &req.create_uc; struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; - struct pvrdma_alloc_ucontext_resp uresp; + struct pvrdma_alloc_ucontext_resp uresp = {0}; int ret; void *ptr; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 342e78163613..4abdeb359fb4 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev) } spin_lock_bh(&dev_list_lock); - list_add_tail(&rxe_dev_list, &rxe->list); + list_add_tail(&rxe->list, &rxe_dev_list); spin_unlock_bh(&dev_list_lock); return rxe; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 486d576e55bc..44b2108253bd 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp) del_timer_sync(&qp->rnr_nak_timer); rxe_cleanup_task(&qp->req.task); - if (qp_type(qp) == IB_QPT_RC) - rxe_cleanup_task(&qp->comp.task); + rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ __rxe_do_task(&qp->req.task); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9104e6b8cac9..e71af717e71b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, SHOST_DIX_GUARD_CRC); } - /* - * Limit the sg_tablesize and max_sectors based on the device - * max fastreg page list length. - */ - shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, - ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); - if (iscsi_host_add(shost, ib_conn->device->ib_device->dma_device)) { mutex_unlock(&iser_conn->state_mutex); @@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; shost->max_sectors = min(iser_max_sectors, max_fr_sectors); + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, shost->sg_tablesize, + shost->max_sectors); + if (cmds_max > max_cmds) { iser_info("cmds_max changed from %u to %u\n", cmds_max, max_cmds); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0be6a7c5ddb5..9d0b22ad58c1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -496,7 +496,6 @@ struct ib_conn { * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors * @scsi_sg_tablesize: scsi host sg_tablesize - * @scsi_max_sectors: scsi host max sectors */ struct iser_conn { struct ib_conn ib_conn; @@ -519,7 +518,6 @@ struct iser_conn { struct iser_rx_desc *rx_descs; u32 num_rx_descs; unsigned short scsi_sg_tablesize; - unsigned int scsi_max_sectors; bool snd_w_inv; }; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 8ae7a3beddb7..6a9d1cb548ee 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, device->ib_device->attrs.max_fast_reg_page_list_len); - if (sg_tablesize > sup_sg_tablesize) { - sg_tablesize = sup_sg_tablesize; - iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; - } else { - iser_conn->scsi_max_sectors = max_sectors; - } - - iser_conn->scsi_sg_tablesize = sg_tablesize; - - iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", - iser_conn, iser_conn->scsi_sg_tablesize, - iser_conn->scsi_max_sectors); + iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); } /** diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8ddc07123193..79bf48477ddb 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, struct srp_fr_desc *d; struct ib_mr *mr; int i, ret = -EINVAL; + enum ib_mr_type mr_type; if (pool_size <= 0) goto err; @@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free_list); + if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { - mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, - max_page_list_len); + mr = ib_alloc_mr(pd, mr_type, max_page_list_len); if (IS_ERR(mr)) { ret = PTR_ERR(mr); if (ret == -ENOMEM) @@ -3694,6 +3699,12 @@ static int __init srp_init_module(void) indirect_sg_entries = cmd_sg_entries; } + if (indirect_sg_entries > SG_MAX_SEGMENTS) { + pr_warn("Clamping indirect_sg_entries to %u\n", + SG_MAX_SEGMENTS); + indirect_sg_entries = SG_MAX_SEGMENTS; + } + srp_remove_wq = create_workqueue("srp_remove"); if (!srp_remove_wq) { ret = -ENOMEM; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index f3135ae22df4..abd18f31b24f 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -22,7 +22,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/miscdevice.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/init.h> diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6d9499658671..c7d5b2b643d1 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad) input_dev->name = xpad->name; input_dev->phys = xpad->phys; usb_to_input_id(xpad->udev, &input_dev->id); + + if (xpad->xtype == XTYPE_XBOX360W) { + /* x360w controllers and the receiver have different ids */ + input_dev->id.product = 0x02a1; + } + input_dev->dev.parent = &xpad->intf->dev; input_set_drvdata(input_dev, xpad); diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c index a8b0a2eec344..7fed92fb8cc1 100644 --- a/drivers/input/misc/adxl34x-i2c.c +++ b/drivers/input/misc/adxl34x-i2c.c @@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = { MODULE_DEVICE_TABLE(i2c, adxl34x_id); -#ifdef CONFIG_OF static const struct of_device_id adxl34x_of_id[] = { /* * The ADXL346 is backward-compatible with the ADXL345. Differences are @@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = { }; MODULE_DEVICE_TABLE(of, adxl34x_of_id); -#endif static struct i2c_driver adxl34x_driver = { .driver = { .name = "adxl34x", .pm = &adxl34x_i2c_pm, - .of_match_table = of_match_ptr(adxl34x_of_id), + .of_match_table = adxl34x_of_id, }, .probe = adxl34x_i2c_probe, .remove = adxl34x_i2c_remove, diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index cde6f4bd8ea2..6d279aa27cb9 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -114,7 +114,7 @@ enum SS4_PACKET_ID { (_b[1] & 0x7F) \ ) -#define SS4_TS_Y_V2(_b) (s8)( \ +#define SS4_TS_Y_V2(_b) -(s8)( \ ((_b[3] & 0x01) << 7) | \ (_b[2] & 0x7F) \ ) diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c index aa7c5da60800..cb2bf203f4ca 100644 --- a/drivers/input/mouse/synaptics_i2c.c +++ b/drivers/input/mouse/synaptics_i2c.c @@ -29,7 +29,7 @@ * after soft reset, we should wait for 1 ms * before the device becomes operational */ -#define SOFT_RESET_DELAY_MS 3 +#define SOFT_RESET_DELAY_US 3000 /* and after hard reset, we should wait for max 500ms */ #define HARD_RESET_DELAY_MS 500 @@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client) if (ret) { dev_err(&client->dev, "Unable to reset device\n"); } else { - msleep(SOFT_RESET_DELAY_MS); + usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100); ret = synaptics_i2c_config(client); if (ret) dev_err(&client->dev, "Unable to config device\n"); diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig index 30cc627a4f45..8993983e3fe4 100644 --- a/drivers/input/rmi4/Kconfig +++ b/drivers/input/rmi4/Kconfig @@ -41,7 +41,8 @@ config RMI4_SMB config RMI4_F03 bool "RMI4 Function 03 (PS2 Guest)" - depends on RMI4_CORE && SERIO + depends on RMI4_CORE + depends on SERIO=y || RMI4_CORE=SERIO help Say Y here if you want to add support for RMI4 function 03. diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 77551f522202..a7618776705a 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 02aec284deca..3e6003d32e56 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev) case QUEUE_HEADER_NORMAL: report_count = ts->buf[FW_HDR_COUNT]; - if (report_count > 3) { + if (report_count == 0 || report_count > 3) { dev_err(&client->dev, - "too large report count: %*ph\n", + "bad report count: %*ph\n", HEADER_SIZE, ts->buf); break; } diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 1a1d99704fe6..3b11422b1cce 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *); static void listen_check(DIVA_CAPI_ADAPTER *); static byte AddInfo(byte **, byte **, byte *, byte *); static byte getChannel(API_PARSE *); -static void IndParse(PLCI *, word *, byte **, byte); +static void IndParse(PLCI *, const word *, byte **, byte); static byte ie_compare(byte *, byte *); static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *); static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word); @@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci) /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */ /* SMSG is situated at the end because its 0 (for compatibility reasons */ /* (see Info_Mask Bit 4, first IE. then the message type) */ - word parms_id[] = + static const word parms_id[] = {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA, UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW, RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR, @@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci) /* 14 FTY repl by ESC_CHI */ /* 18 PI repl by ESC_LAW */ /* removed OAD changed to 0xff for future use, OAD is multiIE now */ - word multi_fac_id[] = {1, FTY}; - word multi_pi_id[] = {1, PI}; - word multi_CiPN_id[] = {1, OAD}; - word multi_ssext_id[] = {1, ESC_SSEXT}; + static const word multi_fac_id[] = {1, FTY}; + static const word multi_pi_id[] = {1, PI}; + static const word multi_CiPN_id[] = {1, OAD}; + static const word multi_ssext_id[] = {1, ESC_SSEXT}; - word multi_vswitch_id[] = {1, ESC_VSWITCH}; + static const word multi_vswitch_id[] = {1, ESC_VSWITCH}; byte *cau; word ncci; @@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a) /* functions for all parameters sent in INDs */ /*------------------------------------------------------------------*/ -static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize) +static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize) { word ploc; /* points to current location within packet */ byte w; @@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others) ((CAPI_MSG *) msg)->header.ncci = 0; ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; - PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); + ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff; + ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8; ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; w = api_put(notify_plci->appl, (CAPI_MSG *) msg); if (w != _QUEUE_FULL) diff --git a/drivers/md/md.h b/drivers/md/md.h index e38936d05df1..2a514036a83d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; +/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */ enum mddev_flags { MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ MD_CLOSING, /* If set, we are closing the array, do not open @@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev) { return mddev->cluster_info && mddev->bitmap_info.nodes > 1; } + +/* clear unsupported mddev_flags */ +static inline void mddev_clear_unsupported_flags(struct mddev *mddev, + unsigned long unsupported_flags) +{ + mddev->flags &= ~unsupported_flags; +} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a162fedeb51a..848365d474f3 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -26,6 +26,11 @@ #include "raid0.h" #include "raid5.h" +#define UNSUPPORTED_MDDEV_FLAGS \ + ((1L << MD_HAS_JOURNAL) | \ + (1L << MD_JOURNAL_CLEAN) | \ + (1L << MD_FAILFAST_SUPPORTED)) + static int raid0_congested(struct mddev *mddev, int bits) { struct r0conf *conf = mddev->private; @@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev) mddev->delta_disks = -1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_HAS_JOURNAL, &mddev->flags); - clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); @@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev) mddev->degraded = 0; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); return priv_conf; @@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev) mddev->raid_disks = 1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); return priv_conf; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a1f3fbed9100..7b0f647bcccb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -42,6 +42,10 @@ #include "raid1.h" #include "bitmap.h" +#define UNSUPPORTED_MDDEV_FLAGS \ + ((1L << MD_HAS_JOURNAL) | \ + (1L << MD_JOURNAL_CLEAN)) + /* * Number of guaranteed r1bios in case of extreme VM load: */ @@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void raid1_make_request(struct mddev *mddev, struct bio * bio) +static void raid1_read_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; - struct r1bio *r1_bio; struct bio *read_bio; + struct bitmap *bitmap = mddev->bitmap; + const int op = bio_op(bio); + const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + int sectors_handled; + int max_sectors; + int rdisk; + + wait_barrier(conf, bio); + +read_again: + rdisk = read_balance(conf, r1_bio, &max_sectors); + + if (rdisk < 0) { + /* couldn't find anywhere to read from */ + raid_end_bio_io(r1_bio); + return; + } + mirror = conf->mirrors + rdisk; + + if (test_bit(WriteMostly, &mirror->rdev->flags) && + bitmap) { + /* + * Reading from a write-mostly device must take care not to + * over-take any writes that are 'behind' + */ + raid1_log(mddev, "wait behind writes"); + wait_event(bitmap->behind_wait, + atomic_read(&bitmap->behind_writes) == 0); + } + r1_bio->read_disk = rdisk; + r1_bio->start_next_window = 0; + + read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); + + r1_bio->bios[rdisk] = read_bio; + + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; + read_bio->bi_bdev = mirror->rdev->bdev; + read_bio->bi_end_io = raid1_end_read_request; + bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &mirror->rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; + read_bio->bi_private = r1_bio; + + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r1_bio->sector); + + if (max_sectors < r1_bio->sectors) { + /* + * could not read all from this device, so we will need another + * r1_bio. + */ + sectors_handled = (r1_bio->sector + max_sectors + - bio->bi_iter.bi_sector); + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + + /* + * Cannot call generic_make_request directly as that will be + * queued in __make_request and subsequent mempool_alloc might + * block waiting for it. So hand bio over to raid1d. + */ + reschedule_retry(r1_bio); + + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio) - sectors_handled; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); +} + +static void raid1_write_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) +{ + struct r1conf *conf = mddev->private; int i, disks; - struct bitmap *bitmap; + struct bitmap *bitmap = mddev->bitmap; unsigned long flags; const int op = bio_op(bio); - const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)); @@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) md_write_start(mddev, bio); /* wait on superblock update early */ - if (bio_data_dir(bio) == WRITE && - ((bio_end_sector(bio) > mddev->suspend_lo && + if ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio))))) { - /* As the suspend_* range is controlled by - * userspace, we want an interruptible - * wait. + bio->bi_iter.bi_sector, bio_end_sector(bio)))) { + + /* + * As the suspend_* range is controlled by userspace, we want + * an interruptible wait. */ DEFINE_WAIT(w); for (;;) { @@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio)))) + bio->bi_iter.bi_sector, + bio_end_sector(bio)))) break; schedule(); } finish_wait(&conf->wait_barrier, &w); } - start_next_window = wait_barrier(conf, bio); - bitmap = mddev->bitmap; - - /* - * make_request() can abort the operation when read-ahead is being - * used and no empty request is available. - * - */ - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio); - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector; - - /* We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r1_bio and no locking - * will be needed when requests complete. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - - if (rw == READ) { - /* - * read balancing logic: - */ - int rdisk; - -read_again: - rdisk = read_balance(conf, r1_bio, &max_sectors); - - if (rdisk < 0) { - /* couldn't find anywhere to read from */ - raid_end_bio_io(r1_bio); - return; - } - mirror = conf->mirrors + rdisk; - - if (test_bit(WriteMostly, &mirror->rdev->flags) && - bitmap) { - /* Reading from a write-mostly device must - * take care not to over-take any writes - * that are 'behind' - */ - raid1_log(mddev, "wait behind writes"); - wait_event(bitmap->behind_wait, - atomic_read(&bitmap->behind_writes) == 0); - } - r1_bio->read_disk = rdisk; - r1_bio->start_next_window = 0; - - read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - - r1_bio->bios[rdisk] = read_bio; - - read_bio->bi_iter.bi_sector = r1_bio->sector + - mirror->rdev->data_offset; - read_bio->bi_bdev = mirror->rdev->bdev; - read_bio->bi_end_io = raid1_end_read_request; - bio_set_op_attrs(read_bio, op, do_sync); - if (test_bit(FailFast, &mirror->rdev->flags) && - test_bit(R1BIO_FailFast, &r1_bio->state)) - read_bio->bi_opf |= MD_FAILFAST; - read_bio->bi_private = r1_bio; - - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), - read_bio, disk_devt(mddev->gendisk), - r1_bio->sector); - - if (max_sectors < r1_bio->sectors) { - /* could not read all from this device, so we will - * need another r1_bio. - */ - - sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - /* Cannot call generic_make_request directly - * as that will be queued in __make_request - * and subsequent mempool_alloc might block waiting - * for it. So hand bio over to raid1d. - */ - reschedule_retry(r1_bio); - - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio) - sectors_handled; - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector + - sectors_handled; - goto read_again; - } else - generic_make_request(read_bio); - return; - } - - /* - * WRITE: - */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); raid1_log(mddev, "wait queued"); @@ -1280,8 +1261,7 @@ read_again: int bad_sectors; int is_bad; - is_bad = is_badblock(rdev, r1_bio->sector, - max_sectors, + is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* mustn't write here until the bad block is @@ -1370,7 +1350,8 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); if (first_clone) { /* do behind I/O ? @@ -1464,6 +1445,40 @@ read_again: wake_up(&conf->wait_barrier); } +static void raid1_make_request(struct mddev *mddev, struct bio *bio) +{ + struct r1conf *conf = mddev->private; + struct r1bio *r1_bio; + + /* + * make_request() can abort the operation when read-ahead is being + * used and no empty request is available. + * + */ + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio); + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector; + + /* + * We might need to issue multiple reads to different devices if there + * are bad blocks around, so we keep track of the number of reads in + * bio->bi_phys_segments. If this is 0, there is only one r1_bio and + * no locking will be needed when requests complete. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + bio_clear_flag(bio, BIO_SEG_VALID); + + if (bio_data_dir(bio) == READ) + raid1_read_request(mddev, bio, r1_bio); + else + raid1_write_request(mddev, bio, r1_bio); +} + static void raid1_status(struct seq_file *seq, struct mddev *mddev) { struct r1conf *conf = mddev->private; @@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev) if (!IS_ERR(conf)) { /* Array must appear to be quiesced */ conf->array_frozen = 1; - clear_bit(MD_HAS_JOURNAL, &mddev->flags); - clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); + mddev_clear_unsupported_flags(mddev, + UNSUPPORTED_MDDEV_FLAGS); } return conf; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ab5e86209322..1920756828df 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void __make_request(struct mddev *mddev, struct bio *bio) +static void raid10_read_request(struct mddev *mddev, struct bio *bio, + struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - struct r10bio *r10_bio; struct bio *read_bio; + const int op = bio_op(bio); + const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + int sectors_handled; + int max_sectors; + sector_t sectors; + struct md_rdev *rdev; + int slot; + + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + + sectors = bio_sectors(bio); + while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { + /* + * IO spans the reshape position. Need to wait for reshape to + * pass + */ + raid10_log(conf->mddev, "wait reshape"); + allow_barrier(conf); + wait_event(conf->wait_barrier, + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); + wait_barrier(conf); + } + +read_again: + rdev = read_balance(conf, r10_bio, &max_sectors); + if (!rdev) { + raid_end_bio_io(r10_bio); + return; + } + slot = r10_bio->read_slot; + + read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, + max_sectors); + + r10_bio->devs[slot].bio = read_bio; + r10_bio->devs[slot].rdev = rdev; + + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + + choose_data_offset(r10_bio, rdev); + read_bio->bi_bdev = rdev->bdev; + read_bio->bi_end_io = raid10_end_read_request; + bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; + read_bio->bi_private = r10_bio; + + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r10_bio->sector); + if (max_sectors < r10_bio->sectors) { + /* + * Could not read all from this device, so we will need another + * r10_bio. + */ + sectors_handled = (r10_bio->sector + max_sectors + - bio->bi_iter.bi_sector); + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + /* + * Cannot call generic_make_request directly as that will be + * queued in __generic_make_request and subsequent + * mempool_alloc might block waiting for it. so hand bio over + * to raid10d. + */ + reschedule_retry(r10_bio); + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = bio_sectors(bio) - sectors_handled; + r10_bio->state = 0; + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); + return; +} + +static void raid10_write_request(struct mddev *mddev, struct bio *bio, + struct r10bio *r10_bio) +{ + struct r10conf *conf = mddev->private; int i; const int op = bio_op(bio); - const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_fua = (bio->bi_opf & REQ_FUA); unsigned long flags; struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid10_plug_cb *plug = NULL; + sector_t sectors; int sectors_handled; int max_sectors; - int sectors; md_write_start(mddev, bio); @@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio) while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_iter.bi_sector < conf->reshape_progress && bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { - /* IO spans the reshape position. Need to wait for - * reshape to pass + /* + * IO spans the reshape position. Need to wait for reshape to + * pass */ raid10_log(conf->mddev, "wait reshape"); allow_barrier(conf); @@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) sectors); wait_barrier(conf); } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio_data_dir(bio) == WRITE && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && bio->bi_iter.bi_sector + sectors > conf->reshape_progress) @@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio) conf->reshape_safe = mddev->reshape_position; } - r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); - - r10_bio->master_bio = bio; - r10_bio->sectors = sectors; - - r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_iter.bi_sector; - r10_bio->state = 0; - - /* We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r10_bio and no locking - * will be needed when the request completes. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - - if (rw == READ) { - /* - * read balancing logic: - */ - struct md_rdev *rdev; - int slot; - -read_again: - rdev = read_balance(conf, r10_bio, &max_sectors); - if (!rdev) { - raid_end_bio_io(r10_bio); - return; - } - slot = r10_bio->read_slot; - - read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - - r10_bio->devs[slot].bio = read_bio; - r10_bio->devs[slot].rdev = rdev; - - read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + - choose_data_offset(r10_bio, rdev); - read_bio->bi_bdev = rdev->bdev; - read_bio->bi_end_io = raid10_end_read_request; - bio_set_op_attrs(read_bio, op, do_sync); - if (test_bit(FailFast, &rdev->flags) && - test_bit(R10BIO_FailFast, &r10_bio->state)) - read_bio->bi_opf |= MD_FAILFAST; - read_bio->bi_private = r10_bio; - - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), - read_bio, disk_devt(mddev->gendisk), - r10_bio->sector); - if (max_sectors < r10_bio->sectors) { - /* Could not read all from this device, so we will - * need another r10_bio. - */ - sectors_handled = (r10_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r10_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - /* Cannot call generic_make_request directly - * as that will be queued in __generic_make_request - * and subsequent mempool_alloc might block - * waiting for it. so hand bio over to raid10d. - */ - reschedule_retry(r10_bio); - - r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); - - r10_bio->master_bio = bio; - r10_bio->sectors = bio_sectors(bio) - sectors_handled; - r10_bio->state = 0; - r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_iter.bi_sector + - sectors_handled; - goto read_again; - } else - generic_make_request(read_bio); - return; - } - - /* - * WRITE: - */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); raid10_log(mddev, "wait queued"); @@ -1300,8 +1308,7 @@ retry_write: int bad_sectors; int is_bad; - is_bad = is_badblock(rdev, dev_sector, - max_sectors, + is_bad = is_badblock(rdev, dev_sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* Mustn't write here until the bad block @@ -1405,8 +1412,7 @@ retry_write: r10_bio->devs[i].bio = mbio; mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ - choose_data_offset(r10_bio, - rdev)); + choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); @@ -1457,8 +1463,7 @@ retry_write: r10_bio->devs[i].repl_bio = mbio; mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + - choose_data_offset( - r10_bio, rdev)); + choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); @@ -1503,6 +1508,36 @@ retry_write: one_write_done(r10_bio); } +static void __make_request(struct mddev *mddev, struct bio *bio) +{ + struct r10conf *conf = mddev->private; + struct r10bio *r10_bio; + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = bio_sectors(bio); + + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_iter.bi_sector; + r10_bio->state = 0; + + /* + * We might need to issue multiple reads to different devices if there + * are bad blocks around, so we keep track of the number of reads in + * bio->bi_phys_segments. If this is 0, there is only one r10_bio and + * no locking will be needed when the request completes. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + bio_clear_flag(bio, BIO_SEG_VALID); + + if (bio_data_dir(bio) == READ) + raid10_read_request(mddev, bio, r10_bio); + else + raid10_write_request(mddev, bio, r10_bio); +} + static void raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index d7bfb6fc8aef..0e8ed2c327b0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1682,8 +1682,7 @@ out: static struct stripe_head * r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect, - sector_t log_start) + sector_t stripe_sect) { struct stripe_head *sh; @@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf, return NULL; /* no more stripe available */ r5l_recovery_reset_stripe(sh); - sh->log_start = log_start; return sh; } @@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect, ctx->pos); + conf, stripe_sect); } if (!sh) { pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", @@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, conf->min_nr_stripes * 2); raid5_set_cache_size(mddev, conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe( - conf, stripe_sect, ctx->pos); + sh = r5c_recovery_alloc_stripe(conf, + stripe_sect); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", @@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { r5l_recovery_replay_one_stripe(conf, sh, ctx); - sh->log_start = ctx->pos; list_move_tail(&sh->lru, cached_stripe_list); } r5l_recovery_load_data(log, sh, ctx, payload, @@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log, set_bit(R5_UPTODATE, &dev->flags); } } - list_add_tail(&sh->r5c, &log->stripe_in_journal_list); - atomic_inc(&log->stripe_in_journal_count); } /* @@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, struct stripe_head *sh, *next; struct mddev *mddev = log->rdev->mddev; struct page *page; + sector_t next_checkpoint = MaxSector; page = alloc_page(GFP_KERNEL); if (!page) { @@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, return -ENOMEM; } + WARN_ON(list_empty(&ctx->cached_list)); + list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { struct r5l_meta_block *mb; int i; @@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_WRITE, REQ_FUA, false); sh->log_start = ctx->pos; + list_add_tail(&sh->r5c, &log->stripe_in_journal_list); + atomic_inc(&log->stripe_in_journal_count); ctx->pos = write_pos; ctx->seq += 1; - + next_checkpoint = sh->log_start; list_del_init(&sh->lru); raid5_release_stripe(sh); } + log->next_checkpoint = next_checkpoint; __free_page(page); return 0; } @@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log) struct r5l_recovery_ctx ctx; int ret; sector_t pos; - struct stripe_head *sh; ctx.pos = log->last_checkpoint; ctx.seq = log->last_cp_seq; @@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log) log->next_checkpoint = ctx.pos; r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); - } else { - sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru); - log->next_checkpoint = sh->log_start; } if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) pr_debug("md/raid:%s: starting from clean shutdown\n", mdname(mddev)); else { - pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n", + pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", mdname(mddev), ctx.data_only_stripes, ctx.data_parity_stripes); @@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, if (do_wakeup) wake_up(&conf->wait_for_overlap); - if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) - return; - spin_lock_irq(&conf->log->stripe_in_journal_lock); list_del_init(&sh->r5c); spin_unlock_irq(&conf->log->stripe_in_journal_lock); @@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->stripe_in_journal_lock); atomic_set(&log->stripe_in_journal_count, 0); + rcu_assign_pointer(conf->log, log); + if (r5l_load_log(log)) goto error; - rcu_assign_pointer(conf->log, log); set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); return 0; error: + rcu_assign_pointer(conf->log, NULL); md_unregister_thread(&log->reclaim_thread); reclaim_thread: mempool_destroy(log->meta_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 06d7279bdd04..36c13e4be9c9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -62,6 +62,8 @@ #include "raid0.h" #include "bitmap.h" +#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) + #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE @@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev) mddev->new_chunk_sectors = chunksect; ret = setup_conf(mddev); - if (!IS_ERR_VALUE(ret)) - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + if (!IS_ERR(ret)) + mddev_clear_unsupported_flags(mddev, + UNSUPPORTED_MDDEV_FLAGS); return ret; } diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 0ea4efb3de66..ebb5e391b800 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -30,8 +30,9 @@ #include "cec-priv.h" -static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx); -static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx); +static void cec_fill_msg_report_features(struct cec_adapter *adap, + struct cec_msg *msg, + unsigned int la_idx); /* * 400 ms is the time it takes for one 16 byte message to be @@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data) /* Mark it as an error */ data->msg.tx_ts = ktime_get_ns(); - data->msg.tx_status = CEC_TX_STATUS_ERROR | - CEC_TX_STATUS_MAX_RETRIES; + data->msg.tx_status |= CEC_TX_STATUS_ERROR | + CEC_TX_STATUS_MAX_RETRIES; + data->msg.tx_error_cnt++; data->attempts = 0; - data->msg.tx_error_cnt = 1; /* Queue transmitted message for monitoring purposes */ cec_queue_msg_monitor(data->adap, &data->msg, 1); @@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = { [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, - [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST, + [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST, [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, }; @@ -1250,30 +1251,49 @@ configured: for (i = 1; i < las->num_log_addrs; i++) las->log_addr[i] = CEC_LOG_ADDR_INVALID; } + for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) + las->log_addr[i] = CEC_LOG_ADDR_INVALID; adap->is_configured = true; adap->is_configuring = false; cec_post_state_event(adap); - mutex_unlock(&adap->lock); + /* + * Now post the Report Features and Report Physical Address broadcast + * messages. Note that these are non-blocking transmits, meaning that + * they are just queued up and once adap->lock is unlocked the main + * thread will kick in and start transmitting these. + * + * If after this function is done (but before one or more of these + * messages are actually transmitted) the CEC adapter is unconfigured, + * then any remaining messages will be dropped by the main thread. + */ for (i = 0; i < las->num_log_addrs; i++) { + struct cec_msg msg = {}; + if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) continue; - /* - * Report Features must come first according - * to CEC 2.0 - */ - if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED) - cec_report_features(adap, i); - cec_report_phys_addr(adap, i); + msg.msg[0] = (las->log_addr[i] << 4) | 0x0f; + + /* Report Features must come first according to CEC 2.0 */ + if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED && + adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) { + cec_fill_msg_report_features(adap, &msg, i); + cec_transmit_msg_fh(adap, &msg, NULL, false); + } + + /* Report Physical Address */ + cec_msg_report_physical_addr(&msg, adap->phys_addr, + las->primary_device_type[i]); + dprintk(2, "config: la %d pa %x.%x.%x.%x\n", + las->log_addr[i], + cec_phys_addr_exp(adap->phys_addr)); + cec_transmit_msg_fh(adap, &msg, NULL, false); } - for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) - las->log_addr[i] = CEC_LOG_ADDR_INVALID; - mutex_lock(&adap->lock); adap->kthread_config = NULL; - mutex_unlock(&adap->lock); complete(&adap->config_completion); + mutex_unlock(&adap->lock); return 0; unconfigure: @@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs); /* High-level core CEC message handling */ -/* Transmit the Report Features message */ -static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx) +/* Fill in the Report Features message */ +static void cec_fill_msg_report_features(struct cec_adapter *adap, + struct cec_msg *msg, + unsigned int la_idx) { - struct cec_msg msg = { }; const struct cec_log_addrs *las = &adap->log_addrs; const u8 *features = las->features[la_idx]; bool op_is_dev_features = false; unsigned int idx; - /* This is 2.0 and up only */ - if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0) - return 0; - /* Report Features */ - msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; - msg.len = 4; - msg.msg[1] = CEC_MSG_REPORT_FEATURES; - msg.msg[2] = adap->log_addrs.cec_version; - msg.msg[3] = las->all_device_types[la_idx]; + msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; + msg->len = 4; + msg->msg[1] = CEC_MSG_REPORT_FEATURES; + msg->msg[2] = adap->log_addrs.cec_version; + msg->msg[3] = las->all_device_types[la_idx]; /* Write RC Profiles first, then Device Features */ for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { - msg.msg[msg.len++] = features[idx]; + msg->msg[msg->len++] = features[idx]; if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { if (op_is_dev_features) break; op_is_dev_features = true; } } - return cec_transmit_msg(adap, &msg, false); -} - -/* Transmit the Report Physical Address message */ -static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx) -{ - const struct cec_log_addrs *las = &adap->log_addrs; - struct cec_msg msg = { }; - - /* Report Physical Address */ - msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; - cec_msg_report_physical_addr(&msg, adap->phys_addr, - las->primary_device_type[la_idx]); - dprintk(2, "config: la %d pa %x.%x.%x.%x\n", - las->log_addr[la_idx], - cec_phys_addr_exp(adap->phys_addr)); - return cec_transmit_msg(adap, &msg, false); } /* Transmit the Feature Abort message */ @@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, } case CEC_MSG_GIVE_FEATURES: - if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) - return cec_report_features(adap, la_idx); - return 0; + if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0) + return cec_feature_abort(adap, msg); + cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx); + return cec_transmit_msg(adap, &tx_cec_msg, false); default: /* diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index bc5e8cfe7ca2..8f11d7e45993 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h, skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, ETH_ALEN); skb_pull(h->priv->ule_skb, ETH_ALEN); + } else { + /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */ + eth_zero_addr(dest_addr); } /* Handle ULE Extension Headers. */ @@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h, if (!h->priv->ule_bridged) { skb_push(h->priv->ule_skb, ETH_HLEN); h->ethh = (struct ethhdr *)h->priv->ule_skb->data; - if (!h->priv->ule_dbit) { - /* - * dest_addr buffer is only valid if - * h->priv->ule_dbit == 0 - */ - memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN); - eth_zero_addr(h->ethh->h_source); - } else /* zeroize source and dest */ - memset(h->ethh, 0, ETH_ALEN * 2); - + memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN); + eth_zero_addr(h->ethh->h_source); h->ethh->h_proto = htons(h->priv->ule_sndu_type); } /* else: skb is in correct state; nothing to do. */ diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index b31fa6fae009..b979ea148251 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -655,6 +655,7 @@ config VIDEO_S5K6A3 config VIDEO_S5K4ECGX tristate "Samsung S5K4ECGX sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + select CRC32 ---help--- This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M camera sensor with an embedded SoC image signal processor. diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 59872b31f832..f4e92bdfe192 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = { * I2C Driver */ -#ifdef CONFIG_PM - -static int smiapp_suspend(struct device *dev) +static int __maybe_unused smiapp_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); @@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev) return 0; } -static int smiapp_resume(struct device *dev) +static int __maybe_unused smiapp_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); @@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev) return rval; } -#else - -#define smiapp_suspend NULL -#define smiapp_resume NULL - -#endif /* CONFIG_PM */ - static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) { struct smiapp_hwconfig *hwcfg; @@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client, if (IS_ERR(sensor->xshutdown)) return PTR_ERR(sensor->xshutdown); - pm_runtime_enable(&client->dev); - - rval = pm_runtime_get_sync(&client->dev); - if (rval < 0) { - rval = -ENODEV; - goto out_power_off; - } + rval = smiapp_power_on(&client->dev); + if (rval < 0) + return rval; rval = smiapp_identify_module(sensor); if (rval) { @@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client, if (rval < 0) goto out_media_entity_cleanup; + pm_runtime_set_active(&client->dev); + pm_runtime_get_noresume(&client->dev); + pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); pm_runtime_put_autosuspend(&client->dev); @@ -3113,8 +3103,7 @@ out_cleanup: smiapp_cleanup(sensor); out_power_off: - pm_runtime_put(&client->dev); - pm_runtime_disable(&client->dev); + smiapp_power_off(&client->dev); return rval; } @@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client) v4l2_async_unregister_subdev(subdev); - pm_runtime_suspend(&client->dev); pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + smiapp_power_off(&client->dev); + pm_runtime_set_suspended(&client->dev); for (i = 0; i < sensor->ssds_used; i++) { v4l2_device_unregister_subdev(&sensor->ssds[i].sd); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3a0fe8cc64e9..48646a7f3fb0 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd) tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); - /* Svideo should enable YCrCb output and disable GPCL output - * For Composite and TV, it should be the reverse + /* + * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For + * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK + * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the + * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set + * INTREQ/GPCL/VBLK to logic 1. */ val = tvp5150_read(sd, TVP5150_MISC_CTL); if (val < 0) { @@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd) } if (decoder->input == TVP5150_SVIDEO) - val = (val & ~0x40) | 0x10; + val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK; else - val = (val & ~0x10) | 0x40; + val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL; tvp5150_write(sd, TVP5150_MISC_CTL, val); }; @@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = { },{ /* Automatic offset and AGC enabled */ TVP5150_ANAL_CHL_CTL, 0x15 },{ /* Activate YCrCb output 0x9 or 0xd ? */ - TVP5150_MISC_CTL, 0x6f + TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL | + TVP5150_MISC_CTL_INTREQ_OE | + TVP5150_MISC_CTL_YCBCR_OE | + TVP5150_MISC_CTL_SYNC_OE | + TVP5150_MISC_CTL_VBLANK | + TVP5150_MISC_CTL_CLOCK_OE, },{ /* Activates video std autodetection for all standards */ TVP5150_AUTOSW_MSK, 0x0 },{ /* Default format: 0x47. For 4:2:2: 0x40 */ @@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, f = &format->format; - tvp5150_reset(sd, 0); - f->width = decoder->rect.width; f->height = decoder->rect.height / 2; @@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = { static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) { struct tvp5150 *decoder = to_tvp5150(sd); - /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ - int val = 0x09; - - /* Output format: 8-bit 4:2:2 YUV with discrete sync */ - if (decoder->mbus_type == V4L2_MBUS_PARALLEL) - val = 0x0d; + int val; - /* Initializes TVP5150 to its default values */ - /* # set PCLK (27MHz) */ - tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); + /* Enable or disable the video output signals. */ + val = tvp5150_read(sd, TVP5150_MISC_CTL); + if (val < 0) + return val; + + val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE | + TVP5150_MISC_CTL_CLOCK_OE); + + if (enable) { + /* + * Enable the YCbCr and clock outputs. In discrete sync mode + * (non-BT.656) additionally enable the the sync outputs. + */ + val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE; + if (decoder->mbus_type == V4L2_MBUS_PARALLEL) + val |= TVP5150_MISC_CTL_SYNC_OE; + } - if (enable) - tvp5150_write(sd, TVP5150_MISC_CTL, val); - else - tvp5150_write(sd, TVP5150_MISC_CTL, 0x00); + tvp5150_write(sd, TVP5150_MISC_CTL, val); return 0; } @@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c, res = core->hdl.error; goto err; } - v4l2_ctrl_handler_setup(&core->hdl); /* Default is no cropping */ core->rect.top = 0; @@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c, core->rect.left = 0; core->rect.width = TVP5150_H_MAX; + tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */ + res = v4l2_async_register_subdev(sd); if (res < 0) goto err; diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h index 25a994944918..30a48c28d05a 100644 --- a/drivers/media/i2c/tvp5150_reg.h +++ b/drivers/media/i2c/tvp5150_reg.h @@ -9,6 +9,15 @@ #define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ #define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ #define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ +#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7) +#define TVP5150_MISC_CTL_GPCL BIT(6) +#define TVP5150_MISC_CTL_INTREQ_OE BIT(5) +#define TVP5150_MISC_CTL_HVLK BIT(4) +#define TVP5150_MISC_CTL_YCBCR_OE BIT(3) +#define TVP5150_MISC_CTL_SYNC_OE BIT(2) +#define TVP5150_MISC_CTL_VBLANK BIT(1) +#define TVP5150_MISC_CTL_CLOCK_OE BIT(0) + #define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ /* Reserved 05h */ diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c index 979634000597..d5c911c09e2b 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.c +++ b/drivers/media/pci/cobalt/cobalt-driver.c @@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev) static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) { free_irq(pci_dev->irq, (void *)cobalt); - - if (cobalt->msi_enabled) - pci_disable_msi(pci_dev); + pci_free_irq_vectors(pci_dev); } static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, @@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, from being generated. */ cobalt_set_interrupt(cobalt, false); - if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { + if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) { cobalt_err("Could not enable MSI\n"); - cobalt->msi_enabled = false; ret = -EIO; goto err_release; } msi_config_show(cobalt, pci_dev); - cobalt->msi_enabled = true; /* Register IRQ */ if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h index ed00dc9d9399..00f773ec359a 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.h +++ b/drivers/media/pci/cobalt/cobalt-driver.h @@ -287,8 +287,6 @@ struct cobalt { u32 irq_none; u32 irq_full_fifo; - bool msi_enabled; - /* omnitek dma */ int dma_channels; int first_fifo_channel; diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index 07fa08be9e99..d54ebe7e0215 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c @@ -97,14 +97,13 @@ struct pctv452e_state { u8 c; /* transaction counter, wraps around... */ u8 initialized; /* set to 1 if 0x15 has been sent */ u16 last_rc_key; - - unsigned char data[80]; }; static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; + u8 *buf; u8 id; unsigned int rlen; int ret; @@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, return -EIO; } - mutex_lock(&state->ca_mutex); + buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; + id = state->c++; - state->data[0] = SYNC_BYTE_OUT; - state->data[1] = id; - state->data[2] = cmd; - state->data[3] = write_len; + buf[0] = SYNC_BYTE_OUT; + buf[1] = id; + buf[2] = cmd; + buf[3] = write_len; - memcpy(state->data + 4, data, write_len); + memcpy(buf + 4, data, write_len); rlen = (read_len > 0) ? 64 : 0; - ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, - state->data, rlen, /* delay_ms */ 0); + ret = dvb_usb_generic_rw(d, buf, 4 + write_len, + buf, rlen, /* delay_ms */ 0); if (0 != ret) goto failed; ret = -EIO; - if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) + if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; - memcpy(data, state->data + 4, read_len); + memcpy(data, buf + 4, read_len); - mutex_unlock(&state->ca_mutex); + kfree(buf); return 0; failed: err("CI error %d; %02X %02X %02X -> %*ph.", - ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); + ret, SYNC_BYTE_OUT, id, cmd, 3, buf); - mutex_unlock(&state->ca_mutex); + kfree(buf); return ret; } @@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *rcv_buf, u8 rcv_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; + u8 *buf; u8 id; int ret; - mutex_lock(&state->ca_mutex); + buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; + id = state->c++; ret = -EINVAL; if (snd_len > 64 - 7 || rcv_len > 64 - 7) goto failed; - state->data[0] = SYNC_BYTE_OUT; - state->data[1] = id; - state->data[2] = PCTV_CMD_I2C; - state->data[3] = snd_len + 3; - state->data[4] = addr << 1; - state->data[5] = snd_len; - state->data[6] = rcv_len; + buf[0] = SYNC_BYTE_OUT; + buf[1] = id; + buf[2] = PCTV_CMD_I2C; + buf[3] = snd_len + 3; + buf[4] = addr << 1; + buf[5] = snd_len; + buf[6] = rcv_len; - memcpy(state->data + 7, snd_buf, snd_len); + memcpy(buf + 7, snd_buf, snd_len); - ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, - state->data, /* rcv_len */ 64, + ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, + buf, /* rcv_len */ 64, /* delay_ms */ 0); if (ret < 0) goto failed; /* TT USB protocol error. */ ret = -EIO; - if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) + if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; /* I2C device didn't respond as expected. */ ret = -EREMOTEIO; - if (state->data[5] < snd_len || state->data[6] < rcv_len) + if (buf[5] < snd_len || buf[6] < rcv_len) goto failed; - memcpy(rcv_buf, state->data + 7, rcv_len); - mutex_unlock(&state->ca_mutex); + memcpy(rcv_buf, buf + 7, rcv_len); + kfree(buf); return rcv_len; failed: err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, - 7, state->data); + 7, buf); - mutex_unlock(&state->ca_mutex); + kfree(buf); return ret; } @@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter) static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; - u8 *rx; + u8 *b0, *rx; int ret; info("%s: %d\n", __func__, i); @@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) if (state->initialized) return 0; - rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); - if (!rx) + b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL); + if (!b0) return -ENOMEM; - mutex_lock(&state->ca_mutex); + rx = b0 + 5; + /* hmm where shoud this should go? */ ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); if (ret != 0) @@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) __func__, ret); /* this is a one-time initialization, dont know where to put */ - state->data[0] = 0xaa; - state->data[1] = state->c++; - state->data[2] = PCTV_CMD_RESET; - state->data[3] = 1; - state->data[4] = 0; + b0[0] = 0xaa; + b0[1] = state->c++; + b0[2] = PCTV_CMD_RESET; + b0[3] = 1; + b0[4] = 0; /* reset board */ - ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); + ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0); if (ret) goto ret; - state->data[1] = state->c++; - state->data[4] = 1; + b0[1] = state->c++; + b0[4] = 1; /* reset board (again?) */ - ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); + ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0); if (ret) goto ret; state->initialized = 1; ret: - mutex_unlock(&state->ca_mutex); - kfree(rx); + kfree(b0); return ret; } static int pctv452e_rc_query(struct dvb_usb_device *d) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; + u8 *b, *rx; int ret, i; u8 id; - mutex_lock(&state->ca_mutex); + b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL); + if (!b) + return -ENOMEM; + + rx = b + CMD_BUFFER_SIZE; + id = state->c++; /* prepare command header */ - state->data[0] = SYNC_BYTE_OUT; - state->data[1] = id; - state->data[2] = PCTV_CMD_IR; - state->data[3] = 0; + b[0] = SYNC_BYTE_OUT; + b[1] = id; + b[2] = PCTV_CMD_IR; + b[3] = 0; /* send ir request */ - ret = dvb_usb_generic_rw(d, state->data, 4, - state->data, PCTV_ANSWER_LEN, 0); + ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); if (ret != 0) goto ret; if (debug > 3) { - info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); - for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) - info(" %02x", state->data[i + 3]); + info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx); + for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) + info(" %02x", rx[i+3]); info("\n"); } - if ((state->data[3] == 9) && (state->data[12] & 0x01)) { + if ((rx[3] == 9) && (rx[12] & 0x01)) { /* got a "press" event */ - state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); + state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]); if (debug > 2) info("%s: cmd=0x%02x sys=0x%02x\n", - __func__, state->data[6], state->data[7]); + __func__, rx[6], rx[7]); rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); } else if (state->last_rc_key) { @@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d) state->last_rc_key = 0; } ret: - mutex_unlock(&state->ca_mutex); + kfree(b); return ret; } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index a0547dbf9806..76382c858c35 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card, struct ms_id_register id_reg; if (!(*mrq)) { - memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, + memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, sizeof(struct ms_id_register)); *mrq = &card->current_mrq; return 0; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 18e05ca7584f..3600c9993a98 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; + if (!cldev->bus->hbm_f_os_supported) + return; + ret = mei_cldev_enable(cldev); if (ret) return; diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index c6c051b52f55..c6217a4993ad 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_ev_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n", dev->hbm_f_fa_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n", + dev->hbm_f_os_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index dd7f15a65eed..25b4a1ba522d 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev) /* Fixed Address Client Support */ if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) dev->hbm_f_fa_supported = 1; + + /* OS ver message Support */ + if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) + dev->hbm_f_os_supported = 1; } /** diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 9daf3f9aed25..e1e4d47d4d7d 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -76,6 +76,12 @@ #define HBM_MINOR_VERSION_FA 0 #define HBM_MAJOR_VERSION_FA 2 +/* + * MEI version with OS ver message support + */ +#define HBM_MINOR_VERSION_OS 0 +#define HBM_MAJOR_VERSION_OS 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 699693cd8c59..8dadb98662a9 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_ev_supported : hbm feature event notification * @hbm_f_fa_supported : hbm feature fixed address client * @hbm_f_ie_supported : hbm feature immediate reply to enum request + * @hbm_f_os_supported : hbm feature support OS ver message * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -487,6 +488,7 @@ struct mei_device { unsigned int hbm_f_ev_supported:1; unsigned int hbm_f_fa_supported:1; unsigned int hbm_f_ie_supported:1; + unsigned int hbm_f_os_supported:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index b11c3455b040..e6ea8503f40c 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, } } while (busy); - if (host->ops->card_busy && send_status) - return mmc_switch_status(card); - return 0; } @@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (!use_busy_signal) goto out; - /* Switch to new timing before poll and check switch status. */ - if (timing) - mmc_set_timing(host, timing); - /*If SPI or used HW busy detection above, then we don't need to poll. */ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || - mmc_host_is_spi(host)) { - if (send_status) - err = mmc_switch_status(card); + mmc_host_is_spi(host)) goto out_tim; - } /* Let's try to poll to find out when the command is completed. */ err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); + if (err) + goto out; out_tim: - if (err && timing) - mmc_set_timing(host, old_timing); + /* Switch to new timing before check switch status. */ + if (timing) + mmc_set_timing(host, timing); + + if (send_status) { + err = mmc_switch_status(card); + if (err && timing) + mmc_set_timing(host, old_timing); + } out: mmc_retune_release(host); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index b44306b886cb..73db08558e4d 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev) if (!slot) continue; - if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { + if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) dw_mci_set_ios(slot->mmc, &slot->mmc->ios); - dw_mci_setup_bus(slot, true); - } + + /* Force setup bus to guarantee available clock output */ + dw_mci_setup_bus(slot, true); } /* Now that slots are all setup, we can enable card detect */ diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b352760c041e..09739352834c 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) { struct meson_host *host = dev_id; struct mmc_request *mrq; - struct mmc_command *cmd = host->cmd; + struct mmc_command *cmd; u32 irq_en, status, raw_status; irqreturn_t ret = IRQ_HANDLED; if (WARN_ON(!host)) return IRQ_NONE; + cmd = host->cmd; + mrq = host->mrq; if (WARN_ON(!mrq)) @@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) int ret = IRQ_HANDLED; if (WARN_ON(!mrq)) - ret = IRQ_NONE; + return IRQ_NONE; if (WARN_ON(!cmd)) - ret = IRQ_NONE; + return IRQ_NONE; data = cmd->data; if (data) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 44ecebd1ea8c..c8b8ac66ff7e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); cmd1 = cmd->arg; + if (cmd->opcode == MMC_STOP_TRANSMISSION) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; @@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) ssp->base + HW_SSP_BLOCK_SIZE); } - if ((cmd->opcode == MMC_STOP_TRANSMISSION) || - (cmd->opcode == SD_IO_RW_EXTENDED)) + if (cmd->opcode == SD_IO_RW_EXTENDED) cmd0 |= BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 160f695cc09c..278a5a435ab7 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); list_for_each_entry(child, &device->children, node) - acpi_device_fix_up_power(child); + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); if (acpi_bus_get_status(device) || !device->status.present) return -ENODEV; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 353a9ddf6b97..9ce5dcb4abd0 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -426,6 +426,7 @@ config MTD_NAND_ORION config MTD_NAND_OXNAS tristate "NAND Flash support for Oxford Semiconductor SoC" + depends on HAS_IOMEM help This enables the NAND flash controller on Oxford Semiconductor SoCs. @@ -540,7 +541,7 @@ config MTD_NAND_FSMC Flexible Static Memory Controller (FSMC) config MTD_NAND_XWAY - tristate "Support for NAND on Lantiq XWAY SoC" + bool "Support for NAND on Lantiq XWAY SoC" depends on LANTIQ && SOC_TYPE_XWAY help Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 5553a5d9efd1..846a66c1b133 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) init_completion(&host->comp_controller); host->irq = platform_get_irq(pdev, 0); - if ((host->irq < 0) || (host->irq >= NR_IRQS)) { + if (host->irq < 0) { dev_err(&pdev->dev, "failed to get platform irq\n"); res = -EINVAL; goto err_exit3; diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 28c7f474be77..4a5e948c62df 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev) if (IS_ERR(nfc->pbus_base)) return PTR_ERR(nfc->pbus_base); + writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); + clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox"); + nfc->chan = dma_request_chan(&pdev->dev, "rxtx"); if (IS_ERR(nfc->chan)) return PTR_ERR(nfc->chan); diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 1f2948c0c458..895101a5e686 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = { { .compatible = "lantiq,nand-xway" }, {}, }; -MODULE_DEVICE_TABLE(of, xway_nand_match); static struct platform_driver xway_nand_driver = { .probe = xway_nand_probe, @@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = { }, }; -module_platform_driver(xway_nand_driver); - -MODULE_LICENSE("GPL"); +builtin_platform_driver(xway_nand_driver); diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index b8c293373ecc..a306de4318d7 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) */ static int ipddp_create(struct ipddp_route *new_rt) { - struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); + struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); if (rt == NULL) return -ENOMEM; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 8f5e93cb7975..0e0df0ba288c 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota) u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); - napi_complete(napi); + napi_complete_done(napi, work_done); at91_write(priv, AT91_IER, reg_ier); } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3dccd3200d5..606b7d8ffe13 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) end: if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable all IRQs if we are not in bus off state */ if (priv->can.state != CAN_STATE_BUS_OFF) c_can_irq_control(priv, true); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 7be393c96b1a..cf7c18947189 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, dev->irq = pdev->irq; priv->base = addr; + priv->device = &pdev->dev; if (!c_can_pci_data->freq) { dev_err(&pdev->dev, "no clock frequency defined\n"); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 8d6208c0b400..611d16a7061d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, return 0; } +/* Checks the validity of predefined bitrate settings */ +static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < bitrate_const_cnt; i++) { + if (bt->bitrate == bitrate_const[i]) + break; + } + + if (i >= priv->bitrate_const_cnt) + return -EINVAL; + + return 0; +} + static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) { int err; - /* Check if the CAN device has bit-timing parameters */ - if (!btc) - return -EOPNOTSUPP; - /* * Depending on the given can_bittiming parameter structure the CAN * timing parameters are calculated based on the provided bitrate OR * alternatively the CAN timing parameters (tq, prop_seg, etc.) are * provided directly which are then checked and fixed up. */ - if (!bt->tq && bt->bitrate) + if (!bt->tq && bt->bitrate && btc) err = can_calc_bittiming(dev, bt, btc); - else if (bt->tq && !bt->bitrate) + else if (bt->tq && !bt->bitrate && btc) err = can_fixup_bittiming(dev, bt, btc); + else if (!bt->tq && bt->bitrate && bitrate_const) + err = can_validate_bitrate(dev, bt, bitrate_const, + bitrate_const_cnt); else err = -EINVAL; @@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming) + return -EOPNOTSUPP; + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, priv->bittiming_const); + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt); if (err) return err; memcpy(&priv->bittiming, &bt, sizeof(bt)); @@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * data_bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) + return -EOPNOTSUPP; + memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); - err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const); + err = can_get_bittiming(dev, &dbt, + priv->data_bittiming_const, + priv->data_bitrate_const, + priv->data_bitrate_const_cnt); if (err) return err; memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); @@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev, } } + if (data[IFLA_CAN_TERMINATION]) { + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); + const unsigned int num_term = priv->termination_const_cnt; + unsigned int i; + + if (!priv->do_set_termination) + return -EOPNOTSUPP; + + /* check whether given value is supported by the interface */ + for (i = 0; i < num_term; i++) { + if (termval == priv->termination_const[i]) + break; + } + if (i >= num_term) + return -EINVAL; + + /* Finally, set the termination value */ + err = priv->do_set_termination(dev, termval); + if (err) + return err; + + priv->termination = termval; + } + return 0; } @@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(struct can_bittiming)); if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); + if (priv->termination_const) { + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ + priv->termination_const_cnt); + } + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt); + if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt); return size; } @@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) (priv->data_bittiming_const && nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const))) + priv->data_bittiming_const)) || + + (priv->termination_const && + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || + nla_put(skb, IFLA_CAN_TERMINATION_CONST, + sizeof(*priv->termination_const) * + priv->termination_const_cnt, + priv->termination_const))) || + + (priv->bitrate_const && + nla_put(skb, IFLA_CAN_BITRATE_CONST, + sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt, + priv->bitrate_const)) || + + (priv->data_bitrate_const && + nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, + sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt, + priv->data_bitrate_const)) + ) + return -EMSGSIZE; return 0; @@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { */ int register_candev(struct net_device *dev) { + struct can_priv *priv = netdev_priv(dev); + + /* Ensure termination_const, termination_const_cnt and + * do_set_termination consistency. All must be either set or + * unset. + */ + if ((!priv->termination_const != !priv->termination_const_cnt) || + (!priv->termination_const != !priv->do_set_termination)) + return -EINVAL; + + if (!priv->bitrate_const != !priv->bitrate_const_cnt) + return -EINVAL; + + if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) + return -EINVAL; + dev->rtnl_link_ops = &can_link_ops; return register_netdev(dev); } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 16f7cadda5c3..43cfce8b076b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_struct *napi, int quota) work_done += flexcan_poll_bus_err(dev, reg_esr); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable IRQs */ flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 368bb0710d8f..138f5ae75c0b 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ifi_canfd_irq_enable(ndev, 1); } diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f13bb8d9bb84..2ba1a81500c1 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) - napi_complete(napi); + napi_complete_done(napi, received); spin_lock_irqsave(&mod->lock, flags); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 195f15edb32e..7a6554efd42b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota) work_done += m_can_do_rx_poll(dev, (quota - work_done)); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); m_can_enable_all_interrupts(priv); } diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 788459f6bf5c..caed4e6960f8 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota) } /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); priv->ier |= RCAR_CAN_IER_RXFIE; writeb(priv->ier, &priv->regs->ier); } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 43cdd5544b0c..4ef07d97156d 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); /* Enable Rx FIFO interrupts */ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFIE); diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index cdc0c7433a4b..4d4492884e0b 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -310,7 +310,7 @@ pcmcia_bad: pcmcia_failed: pcmcia_disable_device(pcmcia); pcmcia->priv = NULL; - return ret ?: -ENODEV; + return ret; } static const struct pcmcia_device_id softingcs_ids[] = { diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 680d1ff07a55..6749b1829469 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, HECC_DEF_NAPI_WEIGHT); - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); + goto probe_exit_clk; + } + err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); @@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev) struct ti_hecc_priv *priv = netdev_priv(ndev); unregister_candev(ndev); - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); clk_put(priv->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(priv->base); @@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_SLEEPING; - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return 0; } @@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(dev); + int err; - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) + return err; hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index c71a03593595..89aec07c225f 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) can_led_event(ndev, CAN_LED_EVENT_RX); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); priv->write_reg(priv, XCAN_IER_OFFSET, ier); diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 8346e4f9737a..a3c941632217 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o +obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o +bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-y += b53/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 5102a3701a1a..8cf4801994e8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1308,7 +1308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_fdb_dump); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; s8 cpu_port = ds->dst->cpu_port; @@ -1326,11 +1326,10 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); } - dev->ports[port].bridge_dev = bridge; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); b53_for_each_port(dev, i) { - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this local port to the remote port VLAN control @@ -1354,10 +1353,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) } EXPORT_SYMBOL(b53_br_join); -void b53_br_leave(struct dsa_switch *ds, int port) +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - struct net_device *bridge = dev->ports[port].bridge_dev; struct b53_vlan *vl = &dev->vlans[0]; s8 cpu_port = ds->dst->cpu_port; unsigned int i; @@ -1367,7 +1365,7 @@ void b53_br_leave(struct dsa_switch *ds, int port) b53_for_each_port(dev, i) { /* Don't touch the remaining ports */ - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); @@ -1382,7 +1380,6 @@ void b53_br_leave(struct dsa_switch *ds, int port) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - dev->ports[port].bridge_dev = NULL; if (is5325(dev) || is5365(dev)) pvid = 1; @@ -1453,6 +1450,71 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_NONE; } +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress) +{ + struct b53_device *dev = ds->priv; + u16 reg, loc; + + if (ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~MIRROR_MASK; + reg |= BIT(port); + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + reg &= ~CAP_PORT_MASK; + reg |= mirror->to_local_port; + reg |= MIRROR_EN; + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); + + return 0; +} +EXPORT_SYMBOL(b53_mirror_add); + +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct b53_device *dev = ds->priv; + bool loc_disable = false, other_loc_disable = false; + u16 reg, loc; + + if (mirror->ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + /* Update the desired ingress/egress register */ + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~BIT(port); + if (!(reg & MIRROR_MASK)) + loc_disable = true; + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + /* Now look at the other one to know if we can disable mirroring + * entirely + */ + if (mirror->ingress) + b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); + else + b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); + if (!(reg & MIRROR_MASK)) + other_loc_disable = true; + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + /* Both no longer have ports, let's disable mirroring */ + if (loc_disable && other_loc_disable) { + reg &= ~MIRROR_EN; + reg &= ~mirror->to_local_port; + } + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); +} +EXPORT_SYMBOL(b53_mirror_del); + static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, @@ -1477,6 +1539,8 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, }; struct b53_chip_data { @@ -1685,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM7278_DEVICE_ID, + .dev_name = "BCM7278", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries= 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, }; static int b53_switch_init(struct b53_device *dev) @@ -1778,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base, struct dsa_switch *ds; struct b53_device *dev; - ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); + ds = dsa_switch_alloc(base, DSA_MAX_PORTS); if (!ds) return NULL; - dev = (struct b53_device *)(ds + 1); + dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; ds->priv = dev; - ds->dev = base; dev->dev = base; dev->ds = ds; @@ -1882,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev) pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); - return dsa_register_switch(dev->ds, dev->ds->dev->of_node); + return dsa_register_switch(dev->ds, dev->ds->dev); } EXPORT_SYMBOL(b53_switch_register); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 477a16b5660a..fa7556f5d4fb 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = { .of_match_table = b53_of_match, }, }; - -static int __init b53_mdio_driver_register(void) -{ - return mdio_driver_register(&b53_mdio_driver); -} -module_init(b53_mdio_driver_register); - -static void __exit b53_mdio_driver_unregister(void) -{ - mdio_driver_unregister(&b53_mdio_driver); -} -module_exit(b53_mdio_driver_unregister); +mdio_module_driver(b53_mdio_driver); MODULE_DESCRIPTION("B53 MDIO access driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 86f125d55aaf..a9dc90a01438 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -62,6 +62,7 @@ enum { BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, BCM7445_DEVICE_ID = 0x7445, + BCM7278_DEVICE_ID = 0x7278, }; #define B53_N_PORTS 9 @@ -69,7 +70,6 @@ enum { struct b53_port { u16 vlan_ctl_mask; - struct net_device *bridge_dev; }; struct b53_vlan { @@ -179,7 +179,8 @@ static inline int is5301x(struct b53_device *dev) static inline int is58xx(struct b53_device *dev) { return dev->chip_id == BCM58XX_DEVICE_ID || - dev->chip_id == BCM7445_DEVICE_ID; + dev->chip_id == BCM7445_DEVICE_ID || + dev->chip_id == BCM7278_DEVICE_ID; } #define B53_CPU_PORT_25 5 @@ -380,7 +381,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); -void b53_br_leave(struct dsa_switch *ds, int port); +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); @@ -406,5 +407,9 @@ int b53_fdb_del(struct dsa_switch *ds, int port, int b53_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)); +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress); +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror); #endif diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index dac0af4e2cd0..9fd24c418fa4 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -206,6 +206,38 @@ #define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ #define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ +/* Mirror capture control register (16 bit) */ +#define B53_MIR_CAP_CTL 0x10 +#define CAP_PORT_MASK 0xf +#define BLK_NOT_MIR BIT(14) +#define MIRROR_EN BIT(15) + +/* Ingress mirror control register (16 bit) */ +#define B53_IG_MIR_CTL 0x12 +#define MIRROR_MASK 0x1ff +#define DIV_EN BIT(13) +#define MIRROR_FILTER_MASK 0x3 +#define MIRROR_FILTER_SHIFT 14 +#define MIRROR_ALL 0 +#define MIRROR_DA 1 +#define MIRROR_SA 2 + +/* Ingress mirror divider register (16 bit) */ +#define B53_IG_MIR_DIV 0x14 +#define IN_MIRROR_DIV_MASK 0x3ff + +/* Ingress mirror MAC address register (48 bit) */ +#define B53_IG_MIR_MAC 0x16 + +/* Egress mirror control register (16 bit) */ +#define B53_EG_MIR_CTL 0x1C + +/* Egress mirror divider register (16 bit) */ +#define B53_EG_MIR_DIV 0x1E + +/* Egress mirror MAC address register (48 bit) */ +#define B53_EG_MIR_MAC 0x20 + /* Device ID register (8 or 32 bit) */ #define B53_DEVICE_ID 0x30 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 31d017086f8b..2be963252ca5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) } } -static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg, val; - /* Enable the port memories */ - reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); - reg &= ~P_TXQ_PSM_VDD(port); - core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - - /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ - reg = core_readl(priv, CORE_IMP_CTL); - reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); - reg &= ~(RX_DIS | TX_DIS); - core_writel(priv, reg, CORE_IMP_CTL); - - /* Enable forwarding */ - core_writel(priv, SW_FWDG_EN, CORE_SWMODE); - - /* Enable IMP port in dumb mode */ - reg = core_readl(priv, CORE_SWITCH_CTRL); - reg |= MII_DUMB_FWDG_EN; - core_writel(priv, reg, CORE_SWITCH_CTRL); - /* Resolve which bit controls the Broadcom tag */ switch (port) { case 8: @@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); reg &= ~(1 << port); core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); +} + +static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; + + /* Enable the port memories */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + + /* Enable forwarding */ + core_writel(priv, SW_FWDG_EN, CORE_SWMODE); + + /* Enable IMP port in dumb mode */ + reg = core_readl(priv, CORE_SWITCH_CTRL); + reg |= MII_DUMB_FWDG_EN; + core_writel(priv, reg, CORE_SWITCH_CTRL); + + bcm_sf2_brcm_hdr_setup(priv, port); /* Force link status for IMP port */ - reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); + reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); + core_writel(priv, reg, offset); } static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) @@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; + unsigned int i; u32 reg; /* Clear the memory power down */ @@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + /* Enable Broadcom tags for that port if requested */ + if (priv->brcm_tag_mask & BIT(port)) + bcm_sf2_brcm_hdr_setup(priv, port); + + /* Configure Traffic Class to QoS mapping, allow each priority to map + * to a different queue number + */ + reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); + for (i = 0; i < 8; i++) + reg |= i << (PRT_TO_QID_SHIFT * i); + core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); + /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); @@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, if (mode == PHY_INTERFACE_MODE_MOCA) priv->moca_port = port_num; + + if (of_property_read_bool(port, "brcm,use-bcm-hdr")) + priv->brcm_tag_mask |= 1 << port_num; } } @@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct ethtool_eee *p = &priv->port_sts[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; - u32 reg; + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); switch (phydev->interface) { case PHY_INTERFACE_MODE_RGMII: @@ -662,7 +695,7 @@ force_link: if (phydev->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if (!phydev->is_pseudo_fixed_link) p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); @@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 duplex, pause; + u32 duplex, pause, offset; u32 reg; + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); @@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->duplex = !!(duplex & (1 << port)); } - reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + reg = core_readl(priv, offset); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { @@ -1007,12 +1045,80 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, + .get_rxnfc = bcm_sf2_get_rxnfc, + .set_rxnfc = bcm_sf2_set_rxnfc, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, +}; + +struct bcm_sf2_of_data { + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; +}; + +/* Register offsets for the SWITCH_REG_* block */ +static const u16 bcm_sf2_7445_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0C, + [REG_SWITCH_REVISION] = 0x18, + [REG_PHY_REVISION] = 0x1C, + [REG_SPHY_CNTRL] = 0x2C, + [REG_RGMII_0_CNTRL] = 0x34, + [REG_RGMII_1_CNTRL] = 0x40, + [REG_RGMII_2_CNTRL] = 0x4c, + [REG_LED_0_CNTRL] = 0x90, + [REG_LED_1_CNTRL] = 0x94, + [REG_LED_2_CNTRL] = 0x98, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7445_data = { + .type = BCM7445_DEVICE_ID, + .core_reg_align = 0, + .reg_offsets = bcm_sf2_7445_reg_offsets, +}; + +static const u16 bcm_sf2_7278_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0c, + [REG_SWITCH_REVISION] = 0x10, + [REG_PHY_REVISION] = 0x14, + [REG_SPHY_CNTRL] = 0x24, + [REG_RGMII_0_CNTRL] = 0xe0, + [REG_RGMII_1_CNTRL] = 0xec, + [REG_RGMII_2_CNTRL] = 0xf8, + [REG_LED_0_CNTRL] = 0x40, + [REG_LED_1_CNTRL] = 0x4c, + [REG_LED_2_CNTRL] = 0x58, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7278_data = { + .type = BCM7278_DEVICE_ID, + .core_reg_align = 1, + .reg_offsets = bcm_sf2_7278_reg_offsets, }; +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0", + .data = &bcm_sf2_7445_data + }, + { .compatible = "brcm,bcm7278-switch-v4.0", + .data = &bcm_sf2_7278_data + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); + static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; struct bcm_sf2_priv *priv; @@ -1040,11 +1146,22 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; + of_id = of_match_node(bcm_sf2_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + data = of_id->data; + + /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ + priv->type = data->type; + priv->reg_offsets = data->reg_offsets; + priv->core_reg_align = data->core_reg_align; + /* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for * b53_common to work with */ - pdata->chip_id = BCM7445_DEVICE_ID; + pdata->chip_id = priv->type; dev->pdata = pdata; priv->dev = dev; @@ -1055,6 +1172,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); + mutex_init(&priv->cfp.lock); + + /* CFP rule #0 cannot be used for specific classifications, flag it as + * permanently used + */ + set_bit(0, priv->cfp.used); bcm_sf2_identify_ports(priv, dn->child); @@ -1084,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + ret = bcm_sf2_cfp_rst(priv); + if (ret) { + pr_err("failed to reset CFP\n"); + goto out_mdio; + } + /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); @@ -1190,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, bcm_sf2_suspend, bcm_sf2_resume); -static const struct of_device_id bcm_sf2_of_match[] = { - { .compatible = "brcm,bcm7445-switch-v4.0" }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 44692673e1d5..7d3030e04f11 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -52,6 +52,13 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; }; +struct bcm_sf2_cfp_priv { + /* Mutex protecting concurrent accesses to the CFP registers */ + struct mutex lock; + DECLARE_BITMAP(used, CFP_NUM_RULES); + unsigned int rules_cnt; +}; + struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -61,6 +68,11 @@ struct bcm_sf2_priv { void __iomem *fcb; void __iomem *acb; + /* Register offsets indirection tables */ + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; + /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; @@ -95,6 +107,12 @@ struct bcm_sf2_priv { struct device_node *master_mii_dn; struct mii_bus *slave_mii_bus; struct mii_bus *master_mii_bus; + + /* Bitmask of ports needing BRCM tags */ + unsigned int brcm_tag_mask; + + /* CFP rules context */ + struct bcm_sf2_cfp_priv cfp; }; static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) @@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) return dev->priv; } +static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off) +{ + return off << priv->core_reg_align; +} + #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ { \ @@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ { \ u32 indir, dir; \ spin_lock(&priv->indir_lock); \ - dir = __raw_readl(priv->name + off); \ + dir = name##_readl(priv, off); \ indir = reg_readl(priv, REG_DIR_DATA_READ); \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ @@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ { \ spin_lock(&priv->indir_lock); \ reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ - __raw_writel(lower_32_bits(val), priv->name + off); \ + name##_writel(priv, lower_32_bits(val), off); \ spin_unlock(&priv->indir_lock); \ } @@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ priv->irq##which##_mask |= (mask); \ } \ -SF2_IO_MACRO(core); -SF2_IO_MACRO(reg); +static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + return __raw_readl(priv->core + tmp); +} + +static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + __raw_writel(val, priv->core + tmp); +} + +static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off) +{ + return __raw_readl(priv->reg + priv->reg_offsets[off]); +} + +static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off) +{ + __raw_writel(val, priv->reg + priv->reg_offsets[off]); +} + SF2_IO64_MACRO(core); SF2_IO_MACRO(intrl2_0); SF2_IO_MACRO(intrl2_1); @@ -164,4 +207,11 @@ SF2_IO_MACRO(acb); SWITCH_INTR_L2(0); SWITCH_INTR_L2(1); +/* RXNFC */ +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs); +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc); +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); + #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c new file mode 100644 index 000000000000..c71be3e0dc2d --- /dev/null +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -0,0 +1,613 @@ +/* + * Broadcom Starfighter 2 DSA switch CFP support + * + * Copyright (C) 2016, Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <net/dsa.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/bitmap.h> + +#include "bcm_sf2.h" +#include "bcm_sf2_regs.h" + +struct cfp_udf_layout { + u8 slices[UDF_NUM_SLICES]; + u32 mask_value; + +}; + +/* UDF slices layout for a TCPv4/UDPv4 specification */ +static const struct cfp_udf_layout udf_tcpip4_layout = { + .slices = { + /* End of L2, byte offset 12, src IP[0:15] */ + CFG_UDF_EOL2 | 6, + /* End of L2, byte offset 14, src IP[16:31] */ + CFG_UDF_EOL2 | 7, + /* End of L2, byte offset 16, dst IP[0:15] */ + CFG_UDF_EOL2 | 8, + /* End of L2, byte offset 18, dst IP[16:31] */ + CFG_UDF_EOL2 | 9, + /* End of L3, byte offset 0, src port */ + CFG_UDF_EOL3 | 0, + /* End of L3, byte offset 2, dst port */ + CFG_UDF_EOL3 | 1, + 0, 0, 0 + }, + .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, +}; + +static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) +{ + unsigned int i, count = 0; + + for (i = 0; i < UDF_NUM_SLICES; i++) { + if (layout[i] != 0) + count++; + } + + return count; +} + +static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, + unsigned int slice_num, + const u8 *layout) +{ + u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET; + unsigned int i; + + for (i = 0; i < UDF_NUM_SLICES; i++) + core_writel(priv, layout[i], offset + i * 4); +} + +static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_STR_DONE | op; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & OP_STR_DONE)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, + unsigned int addr) +{ + u32 reg; + + WARN_ON(addr >= CFP_NUM_RULES); + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= addr << XCESS_ADDR_SHIFT; + core_writel(priv, reg, CORE_CFP_ACC); +} + +static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) +{ + /* Entry #0 is reserved */ + return CFP_NUM_RULES - 1; +} + +static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_tcpip4_spec *v4_spec; + const struct cfp_udf_layout *layout; + unsigned int slice_num, rule_index; + unsigned int queue_num, port_num; + u8 ip_proto, ip_frag; + u8 num_udf; + u32 reg; + int ret; + + /* Check for unsupported extensions */ + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_etype || fs->m_ext.data[1])) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + ip_frag = be32_to_cpu(fs->m_ext.data[0]); + + /* We do not support discarding packets, check that the + * destination port is enabled and that we are within the + * number of ports supported by the switch + */ + port_num = fs->ring_cookie / 8; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC || + !(BIT(port_num) & ds->enabled_port_mask) || + port_num >= priv->hw_params.num_ports) + return -EINVAL; + + switch (fs->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ip_proto = IPPROTO_TCP; + v4_spec = &fs->h_u.tcp_ip4_spec; + break; + case UDP_V4_FLOW: + ip_proto = IPPROTO_UDP; + v4_spec = &fs->h_u.udp_ip4_spec; + break; + default: + return -EINVAL; + } + + /* We only use one UDF slice for now */ + slice_num = 1; + layout = &udf_tcpip4_layout; + num_udf = bcm_sf2_get_num_udf_slices(layout->slices); + + /* Apply the UDF layout for this filter */ + bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices); + + /* Apply to all packets received through this port */ + core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); + + /* S-Tag status [31:30] + * C-Tag status [29:28] + * L2 framing [27:26] + * L3 framing [25:24] + * IP ToS [23:16] + * IP proto [15:08] + * IP Fragm [7] + * Non 1st frag [6] + * IP Authen [5] + * TTL range [4:3] + * PPPoE session [2] + * Reserved [1] + * UDF_Valid[8] [0] + */ + core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7, + CORE_CFP_DATA_PORT(6)); + + /* UDF_Valid[7:0] [31:24] + * S-Tag [23:8] + * C-Tag [7:0] + */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5)); + + /* C-Tag [31:24] + * UDF_n_A8 [23:8] + * UDF_n_A7 [7:0] + */ + core_writel(priv, 0, CORE_CFP_DATA_PORT(4)); + + /* UDF_n_A7 [31:24] + * UDF_n_A6 [23:8] + * UDF_n_A5 [7:0] + */ + core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8, + CORE_CFP_DATA_PORT(3)); + + /* UDF_n_A5 [31:24] + * UDF_n_A4 [23:8] + * UDF_n_A3 [7:0] + */ + reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | + (u32)be16_to_cpu(v4_spec->psrc) << 8 | + (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(2)); + + /* UDF_n_A3 [31:24] + * UDF_n_A2 [23:8] + * UDF_n_A1 [7:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | + (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(1)); + + /* UDF_n_A1 [31:24] + * UDF_n_A0 [23:8] + * Reserved [7:4] + * Slice ID [3:2] + * Slice valid [1:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | + SLICE_NUM(slice_num) | SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Source port map match */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); + + /* Mask with the specific layout for IPv4 packets */ + core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6)); + + /* Mask all but valid UDFs */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5)); + + /* Mask all */ + core_writel(priv, 0, CORE_CFP_MASK_PORT(4)); + + /* All other UDFs should be matched with the filter */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1)); + core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0)); + + /* Locate the first rule available */ + if (fs->location == RX_CLS_LOC_ANY) + rule_index = find_first_zero_bit(priv->cfp.used, + bcm_sf2_cfp_rule_size(priv)); + else + rule_index = fs->location; + + /* Insert into TCAM now */ + bcm_sf2_cfp_rule_addr_set(priv, rule_index); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) { + pr_err("TCAM entry at addr %d failed\n", rule_index); + return ret; + } + + /* Replace ARL derived destination with DST_MAP derived, define + * which port and queue this should be forwarded to. + * + * We have a small oddity where Port 6 just does not have a + * valid bit here (so we subtract by one). + */ + queue_num = fs->ring_cookie % 8; + if (port_num >= 7) + port_num -= 1; + + reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) | + CHANGE_TC | queue_num << NEW_TC_SHIFT; + + core_writel(priv, reg, CORE_ACT_POL_DATA0); + + /* Set classification ID that needs to be put in Broadcom tag */ + core_writel(priv, rule_index << CHAIN_ID_SHIFT, + CORE_ACT_POL_DATA1); + + core_writel(priv, 0, CORE_ACT_POL_DATA2); + + /* Configure policer RAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); + if (ret) { + pr_err("Policer entry at %d failed\n", rule_index); + return ret; + } + + /* Disable the policer */ + core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); + + /* Now the rate meter */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); + if (ret) { + pr_err("Meter entry at %d failed\n", rule_index); + return ret; + } + + /* Turn on CFP for this rule now */ + reg = core_readl(priv, CORE_CFP_CTL_REG); + reg |= BIT(port); + core_writel(priv, reg, CORE_CFP_CTL_REG); + + /* Flag the rule as being used and return it */ + set_bit(rule_index, priv->cfp.used); + fs->location = rule_index; + + return 0; +} + +static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, + u32 loc) +{ + int ret; + u32 reg; + + /* Refuse deletion of unused rules, and the default reserved rule */ + if (!test_bit(loc, priv->cfp.used) || loc == 0) + return -EINVAL; + + /* Indicate which rule we want to read */ + bcm_sf2_cfp_rule_addr_set(priv, loc); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + + /* Clear its valid bits */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + reg &= ~SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Write back this entry into the TCAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) + return ret; + + clear_bit(loc, priv->cfp.used); + + return 0; +} + +static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) +{ + unsigned int i; + + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xff; + + flow->m_ext.vlan_etype ^= cpu_to_be16(~0); + flow->m_ext.vlan_tci ^= cpu_to_be16(~0); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); +} + +static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, + struct ethtool_rxnfc *nfc, bool search) +{ + struct ethtool_tcpip4_spec *v4_spec; + unsigned int queue_num; + u16 src_dst_port; + u32 reg, ipv4; + int ret; + + if (!search) { + bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); + if (ret) + return ret; + + reg = core_readl(priv, CORE_ACT_POL_DATA0); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + } else { + reg = core_readl(priv, CORE_ACT_POL_DATA0); + } + + /* Extract the destination port */ + nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & + DST_MAP_IB_MASK) - 1; + + /* There is no Port 6, so we compensate for that here */ + if (nfc->fs.ring_cookie >= 6) + nfc->fs.ring_cookie++; + nfc->fs.ring_cookie *= 8; + + /* Extract the destination queue */ + queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; + nfc->fs.ring_cookie += queue_num; + + /* Extract the IP protocol */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); + switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { + case IPPROTO_TCP: + nfc->fs.flow_type = TCP_V4_FLOW; + v4_spec = &nfc->fs.h_u.tcp_ip4_spec; + break; + case IPPROTO_UDP: + nfc->fs.flow_type = UDP_V4_FLOW; + v4_spec = &nfc->fs.h_u.udp_ip4_spec; + break; + default: + /* Clear to exit the search process */ + if (search) + core_readl(priv, CORE_CFP_DATA_PORT(7)); + return -EINVAL; + } + + v4_spec->tos = (reg >> 16) & IPPROTO_MASK; + nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1); + + reg = core_readl(priv, CORE_CFP_DATA_PORT(3)); + /* src port [15:8] */ + src_dst_port = reg << 8; + + reg = core_readl(priv, CORE_CFP_DATA_PORT(2)); + /* src port [7:0] */ + src_dst_port |= (reg >> 24); + + v4_spec->pdst = cpu_to_be16(src_dst_port); + nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); + nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + /* IPv4 dst [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(1)); + /* IPv4 dst [31:16] */ + ipv4 |= (u32)((reg >> 8) & 0xffffff) << 16; + /* IPv4 dst [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + v4_spec->ip4dst = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + /* IPv4 src [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + + if (!(reg & SLICE_VALID)) + return -EINVAL; + + /* IPv4 src [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + /* IPv4 src [31:16] */ + ipv4 |= ((reg >> 8) & 0xffffff) << 16; + v4_spec->ip4src = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + /* Read last to avoid next entry clobbering the results during search + * operations + */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); + if (!(reg & 1 << port)) + return -EINVAL; + + bcm_sf2_invert_masks(&nfc->fs); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + + return 0; +} + +/* We implement the search doing a TCAM search operation */ +static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, + int port, struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + unsigned int index = 1, rules_cnt = 0; + int ret; + u32 reg; + + /* Do not poll on OP_STR_DONE to be self-clearing for search + * operations, we cannot use bcm_sf2_cfp_op here because it completes + * on clearing OP_STR_DONE which won't clear until the entire search + * operation is over. + */ + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= index << XCESS_ADDR_SHIFT; + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + /* Wait for results to be ready */ + reg = core_readl(priv, CORE_CFP_ACC); + + /* Extract the address we are searching */ + index = reg >> XCESS_ADDR_SHIFT; + index &= XCESS_ADDR_MASK; + + /* We have a valid search result, so flag it accordingly */ + if (reg & SEARCH_STS) { + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true); + if (ret) + continue; + + rule_locs[rules_cnt] = index; + rules_cnt++; + } + + /* Search is over break out */ + if (!(reg & OP_STR_DONE)) + break; + + } while (index < CFP_NUM_RULES); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + nfc->rule_cnt = rules_cnt; + + return 0; +} + +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_GRXCLSRLCNT: + /* Subtract the default, unusable rule */ + nfc->rule_cnt = bitmap_weight(priv->cfp.used, + CFP_NUM_RULES) - 1; + /* We support specifying rule locations */ + nfc->data |= RX_CLS_LOC_SPECIAL; + break; + case ETHTOOL_GRXCLSRULE: + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false); + break; + case ETHTOOL_GRXCLSRLALL: + ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); + break; + + case ETHTOOL_SRXCLSRLDEL: + ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg |= TCAM_RESET; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & TCAM_RESET)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 838fe373cd6f..26052450091e 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -12,22 +12,36 @@ #define __BCM_SF2_REGS_H /* Register set relative to 'REG' */ -#define REG_SWITCH_CNTRL 0x00 -#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_STATUS 0x04 -#define REG_DIR_DATA_WRITE 0x08 -#define REG_DIR_DATA_READ 0x0C +enum bcm_sf2_reg_offs { + REG_SWITCH_CNTRL = 0, + REG_SWITCH_STATUS, + REG_DIR_DATA_WRITE, + REG_DIR_DATA_READ, + REG_SWITCH_REVISION, + REG_PHY_REVISION, + REG_SPHY_CNTRL, + REG_RGMII_0_CNTRL, + REG_RGMII_1_CNTRL, + REG_RGMII_2_CNTRL, + REG_LED_0_CNTRL, + REG_LED_1_CNTRL, + REG_LED_2_CNTRL, + REG_SWITCH_REG_MAX, +}; + +/* Relative to REG_SWITCH_CNTRL */ +#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_REVISION 0x18 +/* Relative to REG_SWITCH_REVISION */ #define SF2_REV_MASK 0xffff #define SWITCH_TOP_REV_SHIFT 16 #define SWITCH_TOP_REV_MASK 0xffff -#define REG_PHY_REVISION 0x1C +/* Relative to REG_PHY_REVISION */ #define PHY_REVISION_MASK 0xffff -#define REG_SPHY_CNTRL 0x2C +/* Relative to REG_SPHY_CNTRL */ #define IDDQ_BIAS (1 << 0) #define EXT_PWR_DOWN (1 << 1) #define FORCE_DLL_EN (1 << 2) @@ -37,13 +51,8 @@ #define PHY_PHYAD_SHIFT 8 #define PHY_PHYAD_MASK 0x1F -#define REG_RGMII_0_BASE 0x34 -#define REG_RGMII_CNTRL 0x00 -#define REG_RGMII_IB_STATUS 0x04 -#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08 -#define REG_RGMII_CNTRL_SIZE 0x0C -#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \ - ((x) * REG_RGMII_CNTRL_SIZE)) +#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x)) + /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) #define ID_MODE_DIS (1 << 1) @@ -61,8 +70,8 @@ #define LPI_COUNT_SHIFT 9 #define LPI_COUNT_MASK 0x3F -#define REG_LED_CNTRL_BASE 0x90 -#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4) +#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x)) + #define SPDLNK_SRC_SEL (1 << 24) /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ @@ -125,6 +134,9 @@ #define GMII_SPEED_UP_2G (1 << 6) #define MII_SW_OR (1 << 7) +/* Alternate layout for e.g: 7278 */ +#define CORE_STS_OVERRIDE_IMP2 0x39040 + #define CORE_NEW_CTRL 0x00084 #define IP_MC (1 << 0) #define OUTRANGEERR_DISCARD (1 << 1) @@ -142,6 +154,7 @@ #define SW_LEARN_CNTL(x) (1 << (x)) #define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4) +#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8) #define LINK_STS (1 << 0) #define DUPLX_MODE (1 << 1) #define SPEED_SHIFT 2 @@ -225,6 +238,10 @@ #define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ ((x) * P_TXQ_PSM_VDD_SHIFT)) +#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10)) +#define PRT_TO_QID_MASK 0x3 +#define PRT_TO_QID_SHIFT 3 + #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff @@ -238,4 +255,150 @@ #define CORE_EEE_EN_CTRL 0x24800 #define CORE_EEE_LPI_INDICATE 0x24810 +#define CORE_CFP_ACC 0x28000 +#define OP_STR_DONE (1 << 0) +#define OP_SEL_SHIFT 1 +#define OP_SEL_READ (1 << OP_SEL_SHIFT) +#define OP_SEL_WRITE (2 << OP_SEL_SHIFT) +#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT) +#define OP_SEL_MASK (7 << OP_SEL_SHIFT) +#define CFP_RAM_CLEAR (1 << 4) +#define RAM_SEL_SHIFT 10 +#define TCAM_SEL (1 << RAM_SEL_SHIFT) +#define ACT_POL_RAM (2 << RAM_SEL_SHIFT) +#define RATE_METER_RAM (4 << RAM_SEL_SHIFT) +#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT) +#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT) +#define RED_STAT_RAM (24 << RAM_SEL_SHIFT) +#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT) +#define TCAM_RESET (1 << 15) +#define XCESS_ADDR_SHIFT 16 +#define XCESS_ADDR_MASK 0xff +#define SEARCH_STS (1 << 27) +#define RD_STS_SHIFT 28 +#define RD_STS_TCAM (1 << RD_STS_SHIFT) +#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT) +#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT) +#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT) + +#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010 + +#define CORE_CFP_DATA_PORT_0 0x28040 +#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \ + (x) * 0x10) + +/* UDF_DATA7 */ +#define L3_FRAMING_SHIFT 24 +#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT) +#define IPPROTO_SHIFT 8 +#define IPPROTO_MASK (0xff << IPPROTO_SHIFT) +#define IP_FRAG (1 << 7) + +/* UDF_DATA0 */ +#define SLICE_VALID 3 +#define SLICE_NUM_SHIFT 2 +#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT) + +#define CORE_CFP_MASK_PORT_0 0x280c0 + +#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \ + (x) * 0x10) + +#define CORE_ACT_POL_DATA0 0x28140 +#define VLAN_BYP (1 << 0) +#define EAP_BYP (1 << 1) +#define STP_BYP (1 << 2) +#define REASON_CODE_SHIFT 3 +#define REASON_CODE_MASK 0x3f +#define LOOP_BK_EN (1 << 9) +#define NEW_TC_SHIFT 10 +#define NEW_TC_MASK 0x7 +#define CHANGE_TC (1 << 13) +#define DST_MAP_IB_SHIFT 14 +#define DST_MAP_IB_MASK 0x1ff +#define CHANGE_FWRD_MAP_IB_SHIFT 24 +#define CHANGE_FWRD_MAP_IB_MASK 0x3 +#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT) +#define NEW_DSCP_IB_SHIFT 26 +#define NEW_DSCP_IB_MASK 0x3f + +#define CORE_ACT_POL_DATA1 0x28150 +#define CHANGE_DSCP_IB (1 << 0) +#define DST_MAP_OB_SHIFT 1 +#define DST_MAP_OB_MASK 0x3ff +#define CHANGE_FWRD_MAP_OB_SHIT 11 +#define CHANGE_FWRD_MAP_OB_MASK 0x3 +#define NEW_DSCP_OB_SHIFT 13 +#define NEW_DSCP_OB_MASK 0x3f +#define CHANGE_DSCP_OB (1 << 19) +#define CHAIN_ID_SHIFT 20 +#define CHAIN_ID_MASK 0xff +#define CHANGE_COLOR (1 << 28) +#define NEW_COLOR_SHIFT 29 +#define NEW_COLOR_MASK 0x3 +#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT) +#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT) +#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT) +#define RED_DEFAULT (1 << 31) + +#define CORE_ACT_POL_DATA2 0x28160 +#define MAC_LIMIT_BYPASS (1 << 0) +#define CHANGE_TC_O (1 << 1) +#define NEW_TC_O_SHIFT 2 +#define NEW_TC_O_MASK 0x7 +#define SPCP_RMK_DISABLE (1 << 5) +#define CPCP_RMK_DISABLE (1 << 6) +#define DEI_RMK_DISABLE (1 << 7) + +#define CORE_RATE_METER0 0x28180 +#define COLOR_MODE (1 << 0) +#define POLICER_ACTION (1 << 1) +#define COUPLING_FLAG (1 << 2) +#define POLICER_MODE_SHIFT 3 +#define POLICER_MODE_MASK 0x3 +#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT) +#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT) +#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT) +#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT) + +#define CORE_RATE_METER1 0x28190 +#define EIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER2 0x281a0 +#define EIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER3 0x281b0 +#define EIR_REF_CNT_MASK 0x7ffff + +#define CORE_RATE_METER4 0x281c0 +#define CIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER5 0x281d0 +#define CIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER6 0x281e0 +#define CIR_REF_CNT_MASK 0x7ffff + +#define CORE_CFP_CTL_REG 0x28400 +#define CFP_EN_MAP_MASK 0x1ff + +/* IPv4 slices, 3 of them */ +#define CORE_UDF_0_A_0_8_PORT_0 0x28440 +#define CFG_UDF_OFFSET_MASK 0x1f +#define CFG_UDF_OFFSET_BASE_SHIFT 5 +#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT) + +/* Number of slices for IPv4, IPv6 and non-IP */ +#define UDF_NUM_SLICES 9 + +/* Spacing between different slices */ +#define UDF_SLICE_OFFSET 0x40 + +#define CFP_NUM_RULES 256 + #endif /* __BCM_SF2_REGS_H */ diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eea8e0176e33..bf385377a461 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -222,26 +222,62 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) return 0; } +static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) +{ + return mv88e6xxx_read(chip, addr, reg, val); +} + +static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) +{ + return mv88e6xxx_write(chip, addr, reg, val); +} + +static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) +{ + struct mv88e6xxx_mdio_bus *mdio_bus; + + mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, + list); + if (!mdio_bus) + return NULL; + + return mdio_bus->bus; +} + static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; if (!chip->info->ops->phy_read) return -EOPNOTSUPP; - return chip->info->ops->phy_read(chip, addr, reg, val); + return chip->info->ops->phy_read(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; if (!chip->info->ops->phy_write) return -EOPNOTSUPP; - return chip->info->ops->phy_write(chip, addr, reg, val); + return chip->info->ops->phy_write(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) @@ -611,8 +647,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip) del_timer_sync(&chip->ppu_timer); } -static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 *val) +static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { int err; @@ -625,8 +662,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, return err; } -static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 val) +static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { int err; @@ -664,6 +702,11 @@ static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) return chip->info->family == MV88E6XXX_FAMILY_6320; } +static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6341; +} + static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6351; @@ -1209,8 +1252,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) { - struct net_device *bridge = chip->ports[port].bridge_dev; struct dsa_switch *ds = chip->ds; + struct net_device *bridge = ds->ports[port].bridge_dev; u16 output_ports = 0; int i; @@ -1220,7 +1263,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) } else { for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { /* allow sending frames to every group member */ - if (bridge && chip->ports[i].bridge_dev == bridge) + if (bridge && ds->ports[i].bridge_dev == bridge) output_ports |= BIT(i); /* allow sending frames to CPU port and DSA link(s) */ @@ -1688,7 +1731,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || - mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) || + mv88e6xxx_6341_family(chip)) { struct mv88e6xxx_vtu_entry vstp; /* Adding a VTU entry requires a valid STU entry. As VSTP is not @@ -1782,17 +1826,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (chip->ports[i].bridge_dev == - chip->ports[port].bridge_dev) + if (ds->ports[i].bridge_dev == + ds->ports[port].bridge_dev) break; /* same bridge, check next VLAN */ - if (!chip->ports[i].bridge_dev) + if (!ds->ports[i].bridge_dev) continue; netdev_warn(ds->ports[port].netdev, "hardware VLAN %d already used by %s\n", vlan.vid, - netdev_name(chip->ports[i].bridge_dev)); + netdev_name(ds->ports[i].bridge_dev)); err = -EOPNOTSUPP; goto unlock; } @@ -2282,18 +2326,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; int i, err = 0; mutex_lock(&chip->reg_lock); - /* Assign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = bridge; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (chip->ports[i].bridge_dev == bridge) { + if (ds->ports[i].bridge_dev == br) { err = _mv88e6xxx_port_based_vlan_map(chip, i); if (err) break; @@ -2305,19 +2347,17 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; - struct net_device *bridge = chip->ports[port].bridge_dev; int i; mutex_lock(&chip->reg_lock); - /* Unassign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = NULL; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - if (i == port || chip->ports[i].bridge_dev == bridge) + if (i == port || ds->ports[i].bridge_dev == br) if (_mv88e6xxx_port_based_vlan_map(chip, i)) netdev_warn(ds->ports[i].netdev, "failed to remap\n"); @@ -2543,7 +2583,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || - mv88e6xxx_6185_family(chip)) + mv88e6xxx_6185_family(chip) || mv88e6xxx_6341_family(chip)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { @@ -2597,7 +2637,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip)) { + mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) { /* Port ATU control: disable limiting the number of * address database entries that this port is allowed * to use. @@ -2821,7 +2861,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) int i; chip->ds = ds; - ds->slave_mii_bus = chip->mdio_bus; + ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); mutex_lock(&chip->reg_lock); @@ -2878,15 +2918,16 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; u16 val; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; + if (!chip->info->ops->phy_read) + return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_read(chip, phy, reg, &val); + err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mutex_unlock(&chip->reg_lock); return err ? err : val; @@ -2894,34 +2935,39 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; + if (!chip->info->ops->phy_write) + return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_write(chip, phy, reg, val); + err = chip->info->ops->phy_write(chip, bus, phy, reg, val); mutex_unlock(&chip->reg_lock); return err; } static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, - struct device_node *np) + struct device_node *np, + bool external) { static int index; + struct mv88e6xxx_mdio_bus *mdio_bus; struct mii_bus *bus; int err; - if (np) - chip->mdio_np = of_get_child_by_name(np, "mdio"); - - bus = devm_mdiobus_alloc(chip->dev); + bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); if (!bus) return -ENOMEM; - bus->priv = (void *)chip; + mdio_bus = bus->priv; + mdio_bus->bus = bus; + mdio_bus->chip = chip; + INIT_LIST_HEAD(&mdio_bus->list); + mdio_bus->external = external; + if (np) { bus->name = np->full_name; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); @@ -2934,183 +2980,73 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, bus->write = mv88e6xxx_mdio_write; bus->parent = chip->dev; - if (chip->mdio_np) - err = of_mdiobus_register(bus, chip->mdio_np); + if (np) + err = of_mdiobus_register(bus, np); else err = mdiobus_register(bus); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); - goto out; + return err; } - chip->mdio_bus = bus; - - return 0; - -out: - if (chip->mdio_np) - of_node_put(chip->mdio_np); - - return err; -} - -static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) - -{ - struct mii_bus *bus = chip->mdio_bus; - - mdiobus_unregister(bus); - - if (chip->mdio_np) - of_node_put(chip->mdio_np); -} -#ifdef CONFIG_NET_DSA_HWMON - -static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6); - if (ret < 0) - goto error; - - /* Enable temperature sensor */ - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5)); - if (ret < 0) - goto error; - - /* Wait for temperature to stabilize */ - usleep_range(10000, 12000); - - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - /* Disable temperature sensor */ - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5)); - if (ret < 0) - goto error; - - *temp = ((val & 0x1f) - 5) * 5; - -error: - mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0); - mutex_unlock(&chip->reg_lock); - return ret; -} - -static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; - - *temp = (val & 0xff) - 25; + if (external) + list_add_tail(&mdio_bus->list, &chip->mdios); + else + list_add(&mdio_bus->list, &chip->mdios); return 0; } -static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) - return -EOPNOTSUPP; - - if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) - return mv88e63xx_get_temp(ds, temp); - - return mv88e61xx_get_temp(ds, temp); -} +static const struct of_device_id mv88e6xxx_mdio_external_match[] = { + { .compatible = "marvell,mv88e6xxx-mdio-external", + .data = (void *)true }, + { }, +}; -static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, + struct device_node *np) { - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *temp = 0; + const struct of_device_id *match; + struct device_node *child; + int err; - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; + /* Always register one mdio bus for the internal/default mdio + * bus. This maybe represented in the device tree, but is + * optional. + */ + child = of_get_child_by_name(np, "mdio"); + err = mv88e6xxx_mdio_register(chip, child, false); + if (err) + return err; - *temp = (((val >> 8) & 0x1f) * 5) - 25; + /* Walk the device tree, and see if there are any other nodes + * which say they are compatible with the external mdio + * bus. + */ + for_each_available_child_of_node(np, child) { + match = of_match_node(mv88e6xxx_mdio_external_match, child); + if (match) { + err = mv88e6xxx_mdio_register(chip, child, true); + if (err) + return err; + } + } return 0; } -static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int err; +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - if (err) - goto unlock; - temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - err = mv88e6xxx_phy_page_write(chip, phy, 6, 26, - (val & 0xe0ff) | (temp << 8)); -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - -static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *alarm = false; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; + struct mv88e6xxx_mdio_bus *mdio_bus; + struct mii_bus *bus; - *alarm = !!(val & 0x40); + list_for_each_entry(mdio_bus, &chip->mdios, list) { + bus = mdio_bus->bus; - return 0; + mdiobus_unregister(bus); + } } -#endif /* CONFIG_NET_DSA_HWMON */ static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { @@ -3233,8 +3169,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { static const struct mv88e6xxx_ops mv88e6123_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3280,8 +3216,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { static const struct mv88e6xxx_ops mv88e6161_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3305,8 +3241,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3453,6 +3389,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3478,6 +3416,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3503,6 +3443,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3556,6 +3498,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3712,8 +3656,66 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .reset = mv88e6352_g1_reset, }; +static const struct mv88e6xxx_ops mv88e6141_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + +static const struct mv88e6xxx_ops mv88e6341_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3741,6 +3743,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3768,6 +3772,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { static const struct mv88e6xxx_ops mv88e6391_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -4093,6 +4099,34 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6321_ops, }, + [MV88E6141] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6141, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6141_ops, + }, + + [MV88E6341] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6341, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6341_ops, + }, + [MV88E6350] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, @@ -4222,6 +4256,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) chip->dev = dev; mutex_init(&chip->reg_lock); + INIT_LIST_HEAD(&chip->mdios); return chip; } @@ -4296,7 +4331,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, mv88e6xxx_phy_init(chip); - err = mv88e6xxx_mdio_register(chip, NULL); + err = mv88e6xxx_mdios_register(chip, NULL); if (err) goto free; @@ -4372,12 +4407,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .set_eee = mv88e6xxx_set_eee, .get_eee = mv88e6xxx_get_eee, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, @@ -4407,23 +4436,21 @@ static struct dsa_switch_driver mv88e6xxx_switch_drv = { .ops = &mv88e6xxx_switch_ops, }; -static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, - struct device_node *np) +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) { struct device *dev = chip->dev; struct dsa_switch *ds; - ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + ds = dsa_switch_alloc(dev, DSA_MAX_PORTS); if (!ds) return -ENOMEM; - ds->dev = dev; ds->priv = chip; ds->ops = &mv88e6xxx_switch_ops; dev_set_drvdata(dev, ds); - return dsa_register_switch(ds, np); + return dsa_register_switch(ds, dev); } static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) @@ -4503,18 +4530,18 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) } } - err = mv88e6xxx_mdio_register(chip, np); + err = mv88e6xxx_mdios_register(chip, np); if (err) goto out_g2_irq; - err = mv88e6xxx_register_switch(chip, np); + err = mv88e6xxx_register_switch(chip); if (err) goto out_mdio; return 0; out_mdio: - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); out_g2_irq: if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); @@ -4535,7 +4562,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 3e77071949ab..353e26bea3c3 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) } /* Offset 0x14: EEPROM Command - * Offset 0x15: EEPROM Data + * Offset 0x15: EEPROM Data (for 16-bit data access) + * Offset 0x15: EEPROM Addr (for 8-bit data access) */ static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) @@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_eeprom_wait(chip); } +static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip, + u16 addr, u8 *data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + err = mv88e6xxx_g2_eeprom_cmd(chip, cmd); + if (err) + return err; + + err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd); + if (err) + return err; + + *data = cmd & 0xff; + + return 0; +} + +static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip, + u16 addr, u8 data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data); +} + static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip, u8 addr, u16 *data) { @@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_eeprom_cmd(chip, cmd); } +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_read8(chip, offset, data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) { @@ -410,12 +501,17 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_smi_phy_wait(chip); } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val) +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; @@ -427,12 +523,17 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); } -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val) +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 9aefb7d8b0ad..00e635279ba1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -23,15 +23,24 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) return 0; } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); + +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -50,12 +59,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) } static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 *val) { return -EOPNOTSUPP; } static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 val) { return -EOPNOTSUPP; @@ -67,6 +78,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index a224d66dafd9..9c5c0472b211 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -87,6 +87,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6131 0x106 #define PORT_SWITCH_ID_PROD_NUM_6320 0x115 #define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6141 0x340 #define PORT_SWITCH_ID_PROD_NUM_6161 0x161 #define PORT_SWITCH_ID_PROD_NUM_6165 0x165 #define PORT_SWITCH_ID_PROD_NUM_6171 0x171 @@ -100,6 +101,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6240 0x240 #define PORT_SWITCH_ID_PROD_NUM_6290 0x290 #define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6341 0x341 #define PORT_SWITCH_ID_PROD_NUM_6352 0x352 #define PORT_SWITCH_ID_PROD_NUM_6350 0x371 #define PORT_SWITCH_ID_PROD_NUM_6351 0x375 @@ -382,10 +384,12 @@ #define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) #define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 +#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */ #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 #define GLOBAL2_SMI_PHY_CMD 0x18 #define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15) +#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13) #define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12) #define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \ GLOBAL2_SMI_PHY_CMD_MODE_22 | \ @@ -418,6 +422,7 @@ enum mv88e6xxx_model { MV88E6097, MV88E6123, MV88E6131, + MV88E6141, MV88E6161, MV88E6165, MV88E6171, @@ -432,6 +437,7 @@ enum mv88e6xxx_model { MV88E6290, MV88E6320, MV88E6321, + MV88E6341, MV88E6350, MV88E6351, MV88E6352, @@ -447,6 +453,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6341, /* 6141 6341 */ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ @@ -496,12 +503,6 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_STU, - /* Internal temperature sensor. - * Available from any enabled port's PHY register 26, page 6. - */ - MV88E6XXX_CAP_TEMP, - MV88E6XXX_CAP_TEMP_LIMIT, - /* VLAN Table Unit. * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. */ @@ -532,8 +533,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) #define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU) -#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP) -#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT) #define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU) /* Ingress Rate Limit unit */ @@ -585,7 +584,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -604,13 +602,25 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) +#define MV88E6XXX_FLAGS_FAMILY_6341 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_G1_ATU_FID | \ + MV88E6XXX_FLAG_G1_VTU_FID | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SERDES) + #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ @@ -620,7 +630,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -636,8 +645,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -650,8 +657,6 @@ struct mv88e6xxx_ops; (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -690,10 +695,6 @@ struct mv88e6xxx_vtu_entry { struct mv88e6xxx_bus_ops; -struct mv88e6xxx_priv_port { - struct net_device *bridge_dev; -}; - struct mv88e6xxx_irq { u16 masked; struct irq_chip chip; @@ -734,8 +735,6 @@ struct mv88e6xxx_chip { */ struct mutex stats_mutex; - struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; - /* A switch may have a GPIO line tied to its reset pin. Parse * this from the device tree, and use it before performing * switch soft reset. @@ -745,11 +744,8 @@ struct mv88e6xxx_chip { /* set to size of eeprom if supported by the switch */ int eeprom_len; - /* Device node for the MDIO bus */ - struct device_node *mdio_np; - - /* And the MDIO bus itself */ - struct mii_bus *mdio_bus; + /* List of mdio busses */ + struct list_head mdios; /* There can be two interrupt controllers, which are chained * off a GPIO as interrupt source @@ -765,6 +761,13 @@ struct mv88e6xxx_bus_ops { int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); }; +struct mv88e6xxx_mdio_bus { + struct mii_bus *bus; + struct mv88e6xxx_chip *chip; + struct list_head list; + bool external; +}; + struct mv88e6xxx_ops { int (*get_eeprom)(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); @@ -773,10 +776,12 @@ struct mv88e6xxx_ops { int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr); - int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); - int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); + int (*phy_read)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); + int (*phy_write)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 54d270d59eb0..a4fd4ccf7b67 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } static int -qca8k_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask = BIT(QCA8K_CPU_PORT); int i; - priv->port_sts[port].bridge_dev = bridge; - for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this port to the portvlan mask of the other ports * in the bridge @@ -775,14 +772,13 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, } static void -qca8k_port_bridge_leave(struct dsa_switch *ds, int port) +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int i; for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != - priv->port_sts[port].bridge_dev) + if (ds->ports[i].bridge_dev != br) continue; /* Remove this port to the portvlan mask of the other ports * in the bridge @@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port) QCA8K_PORT_LOOKUP_CTRL(i), BIT(port)); } - priv->port_sts[port].bridge_dev = NULL; + /* Set the cpu port to be the only one in the portvlan mask of * this port */ @@ -954,17 +950,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); if (!priv->ds) return -ENOMEM; priv->ds->priv = priv; - priv->ds->dev = &mdiodev->dev; priv->ds->ops = &qca8k_switch_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); - return dsa_register_switch(priv->ds, priv->ds->dev->of_node); + return dsa_register_switch(priv->ds, &mdiodev->dev); } static void diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 201464719531..1ed4fac6cd6d 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -157,7 +157,6 @@ enum qca8k_fdb_cmd { struct ar8xxx_port_status { struct ethtool_eee eee; - struct net_device *bridge_dev; int enabled; }; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 1f2de4e8207c..2c80611b94ae 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -41,7 +41,48 @@ #define DRV_NAME "dummy" #define DRV_VERSION "1.0" +#undef pr_fmt +#define pr_fmt(fmt) DRV_NAME ": " fmt + static int numdummies = 1; +static int num_vfs; + +struct vf_data_storage { + u8 vf_mac[ETH_ALEN]; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + __be16 vlan_proto; + u16 min_tx_rate; + u16 max_tx_rate; + u8 spoofchk_enabled; + bool rss_query_enabled; + u8 trusted; + int link_state; +}; + +struct dummy_priv { + struct vf_data_storage *vfinfo; +}; + +static int dummy_num_vf(struct device *dev) +{ + return num_vfs; +} + +static struct bus_type dummy_bus = { + .name = "dummy", + .num_vf = dummy_num_vf, +}; + +static void release_dummy_parent(struct device *dev) +{ +} + +static struct device dummy_parent = { + .init_name = "dummy", + .bus = &dummy_bus, + .release = release_dummy_parent, +}; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) @@ -90,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) static int dummy_dev_init(struct net_device *dev) { + struct dummy_priv *priv = netdev_priv(dev); + dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) return -ENOMEM; + priv->vfinfo = NULL; + + if (!num_vfs) + return 0; + + dev->dev.parent = &dummy_parent; + priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!priv->vfinfo) { + free_percpu(dev->dstats); + return -ENOMEM; + } + return 0; } @@ -111,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier) return 0; } +static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) + return -EINVAL; + + memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN); + + return 0; +} + +static int dummy_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + priv->vfinfo[vf].pf_vlan = vlan; + priv->vfinfo[vf].pf_qos = qos; + priv->vfinfo[vf].vlan_proto = vlan_proto; + + return 0; +} + +static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].min_tx_rate = min; + priv->vfinfo[vf].max_tx_rate = max; + + return 0; +} + +static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].spoofchk_enabled = val; + + return 0; +} + +static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].rss_query_enabled = val; + + return 0; +} + +static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].trusted = val; + + return 0; +} + +static int dummy_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + ivi->vf = vf; + memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN); + ivi->vlan = priv->vfinfo[vf].pf_vlan; + ivi->qos = priv->vfinfo[vf].pf_qos; + ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled; + ivi->linkstate = priv->vfinfo[vf].link_state; + ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate; + ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate; + ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled; + ivi->trusted = priv->vfinfo[vf].trusted; + ivi->vlan_proto = priv->vfinfo[vf].vlan_proto; + + return 0; +} + +static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].link_state = state; + + return 0; +} + static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, @@ -120,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, + .ndo_set_vf_mac = dummy_set_vf_mac, + .ndo_set_vf_vlan = dummy_set_vf_vlan, + .ndo_set_vf_rate = dummy_set_vf_rate, + .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk, + .ndo_set_vf_trust = dummy_set_vf_trust, + .ndo_get_vf_config = dummy_get_vf_config, + .ndo_set_vf_link_state = dummy_set_vf_link_state, + .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en, }; static void dummy_get_drvinfo(struct net_device *dev, @@ -133,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = { .get_drvinfo = dummy_get_drvinfo, }; +static void dummy_free_netdev(struct net_device *dev) +{ + struct dummy_priv *priv = netdev_priv(dev); + + kfree(priv->vfinfo); + free_netdev(dev); +} + static void dummy_setup(struct net_device *dev) { ether_setup(dev); @@ -140,7 +323,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; - dev->destructor = free_netdev; + dev->destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; @@ -171,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, + .priv_size = sizeof(struct dummy_priv), .setup = dummy_setup, .validate = dummy_validate, }; @@ -179,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = { module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); +module_param(num_vfs, int, 0); +MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device"); + static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; - dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup); + dev_dummy = alloc_netdev(sizeof(struct dummy_priv), + "dummy%d", NET_NAME_UNKNOWN, dummy_setup); if (!dev_dummy) return -ENOMEM; @@ -203,6 +391,21 @@ static int __init dummy_init_module(void) { int i, err = 0; + if (num_vfs) { + err = bus_register(&dummy_bus); + if (err < 0) { + pr_err("registering dummy bus failed\n"); + return err; + } + + err = device_register(&dummy_parent); + if (err < 0) { + pr_err("registering dummy parent device failed\n"); + bus_unregister(&dummy_bus); + return err; + } + } + rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) @@ -218,12 +421,22 @@ static int __init dummy_init_module(void) out: rtnl_unlock(); + if (err && num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } + return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); + + if (num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } } module_init(dummy_init_module); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 1986ad17950a..084a6d58543a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e4c28fed61d5..8c08f9deef92 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" @@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 24330f4885a9..26dce5bf2c18 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ @@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ -obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 88164529b52a..a81731303730 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget) } if (i < budget) { - napi_complete(napi); + napi_complete_done(napi, i); if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) enable_irq(IRQ_MAC_RX); } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 831bab352f8e..87a11b9f0ea5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget) et131x_handle_send_pkts(adapter); if (work_done < budget) { - napi_complete(&adapter->napi); + napi_complete_done(&adapter->napi, work_done); et131x_enable_interrupts(adapter); } diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 25864bff25ee..527908c7e384 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget) if (rxcomplete < budget) { - napi_complete(napi); + napi_complete_done(napi, rxcomplete); netdev_dbg(priv->dev, "NAPI Complete, did %d packets with budget %d\n", diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 5b7ba25e0065..8a280e7d66bd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -891,6 +891,8 @@ #define PCS_V1_WINDOW_SELECT 0x03fc #define PCS_V2_WINDOW_DEF 0x9060 #define PCS_V2_WINDOW_SELECT 0x9064 +#define PCS_V2_RV_WINDOW_DEF 0x1060 +#define PCS_V2_RV_WINDOW_SELECT 0x1064 /* PCS register entry bit positions and sizes */ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index aaf0350076a9..a7d16db5c4b2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); - XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); mmd_data = XPCS16_IOREAD(pdata, offset); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); @@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); - XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); XPCS16_IOWRITE(pdata, offset, mmd_data); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } @@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); - if (ret) + if (ret) { + netdev_err(pdata->netdev, "error flushing TX queues\n"); return ret; + } /* * Initialize DMA related features diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index f8648e4dbca3..3aa457c8ca21 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_start\n"); - hw_if->init(pdata); + ret = hw_if->init(pdata); + if (ret) + return ret; xgbe_napi_enable(pdata, 1); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index e76b7f65b805..c2730f15bd8b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct xgbe_prv_data *pdata; struct device *dev = &pdev->dev; void __iomem * const *iomap_table; + struct pci_dev *rdev; unsigned int ma_lo, ma_hi; unsigned int reg; int bar_mask; @@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); + /* Set the PCS indirect addressing definition registers */ + rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (rdev && + (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { + pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + } else { + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + } + pci_dev_put(rdev); + /* Configure the PCS indirect addressing support */ - reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); + reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f52a9bd05bac..00108815b55e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -955,6 +955,8 @@ struct xgbe_prv_data { /* XPCS indirect addressing lock */ spinlock_t xpcs_lock; + unsigned int xpcs_window_def_reg; + unsigned int xpcs_window_sel_reg; unsigned int xpcs_window; unsigned int xpcs_window_size; unsigned int xpcs_window_mask; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index de59db6c5841..ab43c52723df 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) processed = xgene_enet_process_ring(ring, budget); if (processed != budget) { - napi_complete(napi); + napi_complete_done(napi, processed); enable_irq(ring->irq); } diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig new file mode 100644 index 000000000000..cdf78e069a39 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -0,0 +1,24 @@ +# +# aQuantia device configuration +# + +config NET_VENDOR_AQUANTIA + bool "aQuantia devices" + default y + ---help--- + Set this to y if you have an Ethernet network cards that uses the aQuantia + AQC107/AQC108 chipset. + + This option does not build any drivers; it casues the aQuantia + drivers that can be built to appear in the list of Ethernet drivers. + + +if NET_VENDOR_AQUANTIA + +config AQTION + tristate "aQuantia AQtion(tm) Support" + depends on PCI && X86_64 + ---help--- + This enables the support for the aQuantia AQtion(tm) Ethernet card. + +endif # NET_VENDOR_AQUANTIA diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile new file mode 100644 index 000000000000..4f4897b689b2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the aQuantia device drivers. +# + +obj-$(CONFIG_AQTION) += atlantic/ diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile new file mode 100644 index 000000000000..e4ae696920ef --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# aQuantia Ethernet Controller AQtion Linux Driver +# Copyright(c) 2014-2017 aQuantia Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see <http://www.gnu.org/licenses/>. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: <rdc-drv@aquantia.com> +# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA +# +################################################################################ + +# +# Makefile for the AQtion(tm) Ethernet driver +# + +obj-$(CONFIG_AQTION) += atlantic.o + +atlantic-objs := aq_main.o \ + aq_nic.o \ + aq_pci_func.o \ + aq_vec.o \ + aq_ring.o \ + aq_hw_utils.o \ + aq_ethtool.o \ + hw_atl/hw_atl_a0.o \ + hw_atl/hw_atl_b0.o \ + hw_atl/hw_atl_utils.o \ + hw_atl/hw_atl_llh.o diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h new file mode 100644 index 000000000000..5f99237a9d52 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -0,0 +1,77 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_cfg.h: Definition of configuration parameters and constants. */ + +#ifndef AQ_CFG_H +#define AQ_CFG_H + +#define AQ_CFG_VECS_DEF 4U +#define AQ_CFG_TCS_DEF 1U + +#define AQ_CFG_TXDS_DEF 4096U +#define AQ_CFG_RXDS_DEF 1024U + +#define AQ_CFG_IS_POLLING_DEF 0U + +#define AQ_CFG_FORCE_LEGACY_INT 0U + +#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U +#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU +#define AQ_CFG_IRQ_MASK 0x1FFU + +#define AQ_CFG_VECS_MAX 8U +#define AQ_CFG_TCS_MAX 8U + +#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) + +/* LRO */ +#define AQ_CFG_IS_LRO_DEF 1U + +/* RSS */ +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U +#define AQ_CFG_RSS_HASHKEY_SIZE 320U + +#define AQ_CFG_IS_RSS_DEF 1U +#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF +#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U + +#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U +#define AQ_CFG_PCI_FUNC_PORTS 2U + +#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) +#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) + +#define AQ_CFG_SKB_FRAGS_MAX 32U + +#define AQ_CFG_NAPI_WEIGHT 64U + +#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U + +/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ + +#define AQ_CFG_FC_MODE 3U + +#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ + +#define AQ_CFG_IS_AUTONEG_DEF 1U +#define AQ_CFG_MTU_DEF 1514U + +#define AQ_CFG_LOCK_TRYS 100U + +#define AQ_CFG_DRV_AUTHOR "aQuantia" +#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver" +#define AQ_CFG_DRV_NAME "aquantia" +#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ + __stringify(NIC_MINOR_DRIVER_VERSION)"."\ + __stringify(NIC_BUILD_DRIVER_VERSION)"."\ + __stringify(NIC_REVISION_DRIVER_VERSION) + +#endif /* AQ_CFG_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h new file mode 100644 index 000000000000..9eb5e222a234 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -0,0 +1,23 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_common.h: Basic includes for all files in project. */ + +#ifndef AQ_COMMON_H +#define AQ_COMMON_H + +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "ver.h" +#include "aq_nic.h" +#include "aq_cfg.h" +#include "aq_utils.h" + +#endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c new file mode 100644 index 000000000000..a761e91471df --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -0,0 +1,262 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.c: Definition of ethertool related functions. */ + +#include "aq_ethtool.h" +#include "aq_nic.h" + +static void aq_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + memset(p, 0, regs_count * sizeof(u32)); + aq_nic_get_regs(aq_nic, regs, p); +} + +static int aq_ethtool_get_regs_len(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + return regs_count * sizeof(u32); +} + +static u32 aq_ethtool_get_link(struct net_device *ndev) +{ + return ethtool_op_get_link(ndev); +} + +static int aq_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + aq_nic_get_link_ksettings(aq_nic, cmd); + cmd->base.speed = netif_carrier_ok(ndev) ? + aq_nic_get_link_speed(aq_nic) : 0U; + + return 0; +} + +static int +aq_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic_set_link_ksettings(aq_nic, cmd); +} + +/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ +static const unsigned int aq_ethtool_stat_queue_lines = 5U; +static const unsigned int aq_ethtool_stat_queue_chars = + 5U * ETH_GSTRING_LEN; +static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { + "InPackets", + "InUCast", + "InMCast", + "InBCast", + "InErrors", + "OutPackets", + "OutUCast", + "OutMCast", + "OutBCast", + "InUCastOctects", + "OutUCastOctects", + "InMCastOctects", + "OutMCastOctects", + "InBCastOctects", + "OutBCastOctects", + "InOctects", + "OutOctects", + "InPacketsDma", + "OutPacketsDma", + "InOctetsDma", + "OutOctetsDma", + "InDroppedDma", + "Queue[0] InPackets", + "Queue[0] OutPackets", + "Queue[0] InJumboPackets", + "Queue[0] InLroPackets", + "Queue[0] InErrors", + "Queue[1] InPackets", + "Queue[1] OutPackets", + "Queue[1] InJumboPackets", + "Queue[1] InLroPackets", + "Queue[1] InErrors", + "Queue[2] InPackets", + "Queue[2] OutPackets", + "Queue[2] InJumboPackets", + "Queue[2] InLroPackets", + "Queue[2] InErrors", + "Queue[3] InPackets", + "Queue[3] OutPackets", + "Queue[3] InJumboPackets", + "Queue[3] InLroPackets", + "Queue[3] InErrors", + "Queue[4] InPackets", + "Queue[4] OutPackets", + "Queue[4] InJumboPackets", + "Queue[4] InLroPackets", + "Queue[4] InErrors", + "Queue[5] InPackets", + "Queue[5] OutPackets", + "Queue[5] InJumboPackets", + "Queue[5] InLroPackets", + "Queue[5] InErrors", + "Queue[6] InPackets", + "Queue[6] OutPackets", + "Queue[6] InJumboPackets", + "Queue[6] InLroPackets", + "Queue[6] InErrors", + "Queue[7] InPackets", + "Queue[7] OutPackets", + "Queue[7] InJumboPackets", + "Queue[7] InLroPackets", + "Queue[7] InErrors", +}; + +static void aq_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + +/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ + BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); + memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); + aq_nic_get_stats(aq_nic, data); +} + +static void aq_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + u32 firmware_version = aq_nic_get_fw_version(aq_nic); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver)); + strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u", firmware_version >> 24, + (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU); + + strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = regs_count; + drvinfo->eedump_len = 0; +} + +static void aq_ethtool_get_strings(struct net_device *ndev, + u32 stringset, u8 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + if (stringset == ETH_SS_STATS) + memcpy(data, *aq_ethtool_stat_names, + sizeof(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_chars); +} + +static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ + int ret = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + switch (stringset) { + case ETH_SS_STATS: + ret = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_lines; + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev) +{ + return AQ_CFG_RSS_INDIRECTION_TABLE_MAX; +} + +static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + return sizeof(cfg->aq_rss.hash_secret_key); +} + +static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + unsigned int i = 0U; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + if (indir) { + for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++) + indir[i] = cfg->aq_rss.indirection_table[i]; + } + if (key) + memcpy(key, cfg->aq_rss.hash_secret_key, + sizeof(cfg->aq_rss.hash_secret_key)); + return 0; +} + +static int aq_ethtool_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = cfg->vecs; + break; + + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +const struct ethtool_ops aq_ethtool_ops = { + .get_link = aq_ethtool_get_link, + .get_regs_len = aq_ethtool_get_regs_len, + .get_regs = aq_ethtool_get_regs, + .get_drvinfo = aq_ethtool_get_drvinfo, + .get_strings = aq_ethtool_get_strings, + .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_rxfh_key_size = aq_ethtool_get_rss_key_size, + .get_rxfh = aq_ethtool_get_rss, + .get_rxnfc = aq_ethtool_get_rxnfc, + .get_sset_count = aq_ethtool_get_sset_count, + .get_ethtool_stats = aq_ethtool_stats, + .get_link_ksettings = aq_ethtool_get_link_ksettings, + .set_link_ksettings = aq_ethtool_set_link_ksettings, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h new file mode 100644 index 000000000000..21c126eeb5eb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -0,0 +1,19 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.h: Declaration of ethertool related functions. */ + +#ifndef AQ_ETHTOOL_H +#define AQ_ETHTOOL_H + +#include "aq_common.h" + +extern const struct ethtool_ops aq_ethtool_ops; + +#endif /* AQ_ETHTOOL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h new file mode 100644 index 000000000000..fce0fd3f23ff --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -0,0 +1,177 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific + * functions. + */ + +#ifndef AQ_HW_H +#define AQ_HW_H + +#include "aq_common.h" + +/* NIC H/W capabilities */ +struct aq_hw_caps_s { + u64 hw_features; + u64 link_speed_msk; + unsigned int hw_priv_flags; + u32 rxds; + u32 txds; + u32 txhwb_alignment; + u32 irq_mask; + u32 vecs; + u32 mtu; + u32 mac_regs_count; + u8 ports; + u8 msix_irqs; + u8 tcs; + u8 rxd_alignment; + u8 rxd_size; + u8 txd_alignment; + u8 txd_size; + u8 tx_rings; + u8 rx_rings; + bool flow_control; + bool is_64_dma; + u32 fw_ver_expected; +}; + +struct aq_hw_link_status_s { + unsigned int mbps; +}; + +#define AQ_HW_IRQ_INVALID 0U +#define AQ_HW_IRQ_LEGACY 1U +#define AQ_HW_IRQ_MSI 2U +#define AQ_HW_IRQ_MSIX 3U + +#define AQ_HW_POWER_STATE_D0 0U +#define AQ_HW_POWER_STATE_D3 3U + +#define AQ_HW_FLAG_STARTED 0x00000004U +#define AQ_HW_FLAG_STOPPING 0x00000008U +#define AQ_HW_FLAG_RESETTING 0x00000010U +#define AQ_HW_FLAG_CLOSING 0x00000020U +#define AQ_HW_LINK_DOWN 0x04000000U +#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_HW_FLAG_ERR_HW 0x80000000U + +#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG) + +struct aq_hw_s { + struct aq_obj_s header; + struct aq_nic_cfg_s *aq_nic_cfg; + struct aq_pci_func_s *aq_pci_func; + void __iomem *mmio; + unsigned int not_ff_addr; + struct aq_hw_link_status_s aq_link_status; +}; + +struct aq_ring_s; +struct aq_ring_param_s; +struct aq_nic_cfg_s; +struct sk_buff; + +struct aq_hw_ops { + struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func, + unsigned int port, struct aq_hw_ops *ops); + + void (*destroy)(struct aq_hw_s *self); + + int (*get_hw_caps)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps); + + int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int frags); + + int (*hw_ring_rx_receive)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int sw_tail_old); + + int (*hw_ring_tx_head_update)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_get_mac_permanent)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + + int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + + int (*hw_get_link_status)(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + + int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*hw_reset)(struct aq_hw_s *self); + + int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr); + + int (*hw_start)(struct aq_hw_s *self); + + int (*hw_stop)(struct aq_hw_s *self); + + int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_tx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_tx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_init)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_rx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask); + + int (*hw_packet_filter_set)(struct aq_hw_s *self, + unsigned int packet_filter); + + int (*hw_multicast_list_set)(struct aq_hw_s *self, + u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count); + + int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, + bool itr_enabled); + + int (*hw_rss_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_rss_hash_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_get_regs)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); + + int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, + unsigned int *p_count); + + int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); + + int (*hw_deinit)(struct aq_hw_s *self); + + int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); +}; + +#endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c new file mode 100644 index 000000000000..5f13465995f6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -0,0 +1,68 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.c: Definitions of helper functions used across + * hardware layer. + */ + +#include "aq_hw_utils.h" +#include "aq_hw.h" + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val) +{ + if (msk ^ ~0) { + u32 reg_old, reg_new; + + reg_old = aq_hw_read_reg(aq_hw, addr); + reg_new = (reg_old & (~msk)) | (val << shift); + + if (reg_old != reg_new) + aq_hw_write_reg(aq_hw, addr, reg_new); + } else { + aq_hw_write_reg(aq_hw, addr, val); + } +} + +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift) +{ + return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift); +} + +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) +{ + u32 value = readl(hw->mmio + reg); + + if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr)) + aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG); + + return value; +} + +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) +{ + writel(value, hw->mmio + reg); +} + +int aq_hw_err_from_flags(struct aq_hw_s *hw) +{ + int err = 0; + + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) { + err = -EIO; + goto err_exit; + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h new file mode 100644 index 000000000000..03b72ddbffb9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -0,0 +1,47 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.h: Declaration of helper functions used across hardware + * layer. + */ + +#ifndef AQ_HW_UTILS_H +#define AQ_HW_UTILS_H + +#include "aq_common.h" + +#ifndef HIDWORD +#define LODWORD(_qw) ((u32)(_qw)) +#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff)) +#endif + +#define AQ_HW_SLEEP(_US_) mdelay(_US_) + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ +do { \ + unsigned int AQ_HW_WAIT_FOR_i; \ + for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ + --AQ_HW_WAIT_FOR_i) {\ + udelay(_US_); \ + } \ + if (!AQ_HW_WAIT_FOR_i) {\ + err = -ETIME; \ + } \ +} while (0) + +struct aq_hw_s; + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val); +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +int aq_hw_err_from_flags(struct aq_hw_s *hw); + +#endif /* AQ_HW_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c new file mode 100644 index 000000000000..c17c70adef0d --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -0,0 +1,273 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.c: Main file for aQuantia Linux driver. */ + +#include "aq_main.h" +#include "aq_nic.h" +#include "aq_pci_func.h" +#include "aq_ethtool.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" + +#include <linux/netdevice.h> +#include <linux/module.h> + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), }, + {} +}; + +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(AQ_CFG_DRV_VERSION); +MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); +MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); + +static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev) +{ + struct aq_hw_ops *ops = NULL; + + ops = hw_atl_a0_get_ops_by_id(pdev); + if (!ops) + ops = hw_atl_b0_get_ops_by_id(pdev); + + return ops; +} + +static int aq_ndev_open(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = NULL; + int err = 0; + + aq_nic = aq_nic_alloc_hot(ndev); + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + err = aq_nic_init(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) + aq_nic_deinit(aq_nic); + return err; +} + +static int aq_ndev_close(struct net_device *ndev) +{ + int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + err = aq_nic_stop(aq_nic); + if (err < 0) + goto err_exit; + aq_nic_deinit(aq_nic); + aq_nic_free_hot_resources(aq_nic); + +err_exit: + return err; +} + +static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_xmit(aq_nic, skb); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (new_mtu == ndev->mtu) { + err = 0; + goto err_exit; + } + if (new_mtu < 68) { + err = -EINVAL; + goto err_exit; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); + if (err < 0) + goto err_exit; + ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + +err_exit: + return err; +} + +static int aq_ndev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); + bool is_lro = false; + + if (aq_cfg->hw_features & NETIF_F_LRO) { + is_lro = features & NETIF_F_LRO; + + if (aq_cfg->is_lro != is_lro) { + aq_cfg->is_lro = is_lro; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + } + } + + return 0; +} + +static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = eth_mac_addr(ndev, addr); + if (err < 0) + goto err_exit; + err = aq_nic_set_mac(aq_nic, ndev); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void aq_ndev_set_multicast_settings(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_set_packet_filter(aq_nic, ndev->flags); + if (err < 0) + goto err_exit; + + if (netdev_mc_count(ndev)) { + err = aq_nic_set_multicast_list(aq_nic, ndev); + if (err < 0) + goto err_exit; + } + +err_exit:; +} + +static const struct net_device_ops aq_ndev_ops = { + .ndo_open = aq_ndev_open, + .ndo_stop = aq_ndev_close, + .ndo_start_xmit = aq_ndev_start_xmit, + .ndo_set_rx_mode = aq_ndev_set_multicast_settings, + .ndo_change_mtu = aq_ndev_change_mtu, + .ndo_set_mac_address = aq_ndev_set_mac_address, + .ndo_set_features = aq_ndev_set_features +}; + +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct aq_hw_ops *aq_hw_ops = NULL; + struct aq_pci_func_s *aq_pci_func = NULL; + int err = 0; + + err = pci_enable_device(pdev); + if (err < 0) + goto err_exit; + aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev); + aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev, + &aq_ndev_ops, &aq_ethtool_ops); + if (!aq_pci_func) { + err = -ENOMEM; + goto err_exit; + } + err = aq_pci_func_init(aq_pci_func); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) { + if (aq_pci_func) + aq_pci_func_free(aq_pci_func); + } + return err; +} + +static void aq_pci_remove(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + aq_pci_func_deinit(aq_pci_func); + aq_pci_func_free(aq_pci_func); +} + +static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static int aq_pci_resume(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + pm_message_t pm_msg = PMSG_RESTORE; + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static struct pci_driver aq_pci_ops = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .suspend = aq_pci_suspend, + .resume = aq_pci_resume, +}; + +static int __init aq_module_init(void) +{ + int err = 0; + + err = pci_register_driver(&aq_pci_ops); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void __exit aq_module_exit(void) +{ + pci_unregister_driver(&aq_pci_ops); +} + +module_init(aq_module_init); +module_exit(aq_module_exit); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h new file mode 100644 index 000000000000..9748e7e575e0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -0,0 +1,17 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.h: Main file for aQuantia Linux driver. */ + +#ifndef AQ_MAIN_H +#define AQ_MAIN_H + +#include "aq_common.h" + +#endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c new file mode 100644 index 000000000000..bed25abd2889 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -0,0 +1,952 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.c: Definition of common code for NIC. */ + +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include "aq_pci_func.h" +#include "aq_nic_internal.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/timer.h> +#include <linux/cpu.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <net/ip.h> + +static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + struct aq_rss_parameters *rss_params = &cfg->aq_rss; + int i = 0; + + static u8 rss_key[40] = { + 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, + 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, + 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, + 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, + 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c + }; + + rss_params->hash_secret_key_size = sizeof(rss_key); + memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); + rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; + + for (i = rss_params->indirection_table_size; i--;) + rss_params->indirection_table[i] = i & (num_rss_queues - 1); +} + +/* Fills aq_nic_cfg with valid defaults */ +static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + cfg->aq_hw_caps = &self->aq_hw_caps; + + cfg->vecs = AQ_CFG_VECS_DEF; + cfg->tcs = AQ_CFG_TCS_DEF; + + cfg->rxds = AQ_CFG_RXDS_DEF; + cfg->txds = AQ_CFG_TXDS_DEF; + + cfg->is_polling = AQ_CFG_IS_POLLING_DEF; + + cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; + cfg->itr = cfg->is_interrupt_moderation ? + AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; + + cfg->is_rss = AQ_CFG_IS_RSS_DEF; + cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; + cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; + cfg->flow_control = AQ_CFG_FC_MODE; + + cfg->mtu = AQ_CFG_MTU_DEF; + cfg->link_speed_msk = AQ_CFG_SPEED_MSK; + cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; + + cfg->is_lro = AQ_CFG_IS_LRO_DEF; + + cfg->vlan_id = 0U; + + aq_nic_rss_init(self, cfg->num_rss_queues); +} + +/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ +int aq_nic_cfg_start(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + /*descriptors */ + cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); + cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); + + /*rss rings */ + cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); + cfg->vecs = min(cfg->vecs, num_online_cpus()); + /* cfg->vecs should be power of 2 for RSS */ + if (cfg->vecs >= 8U) + cfg->vecs = 8U; + else if (cfg->vecs >= 4U) + cfg->vecs = 4U; + else if (cfg->vecs >= 2U) + cfg->vecs = 2U; + else + cfg->vecs = 1U; + + cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); + + if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || + (self->aq_hw_caps.vecs == 1U) || + (cfg->vecs == 1U)) { + cfg->is_rss = 0U; + cfg->vecs = 1U; + } + + cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; + cfg->hw_features = self->aq_hw_caps.hw_features; + return 0; +} + +static void aq_nic_service_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct net_device *ndev = aq_nic_get_ndev(self); + int err = 0; + bool is_busy = false; + unsigned int i = 0U; + struct aq_hw_link_status_s link_status; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + atomic_inc(&self->header.busy_count); + is_busy = true; + if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) + goto err_exit; + + err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); + if (err < 0) + goto err_exit; + + self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + + if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { + if (link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + } else { + netif_carrier_off(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + } + + self->link_status = link_status; + } + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); + } + + ndev->stats.rx_packets = stats_rx.packets; + ndev->stats.rx_bytes = stats_rx.bytes; + ndev->stats.rx_errors = stats_rx.errors; + ndev->stats.tx_packets = stats_tx.packets; + ndev->stats.tx_bytes = stats_tx.bytes; + ndev->stats.tx_errors = stats_tx.errors; + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + mod_timer(&self->service_timer, + jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); +} + +static void aq_nic_polling_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_isr(i, (void *)aq_vec); + + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); +} + +static struct net_device *aq_nic_ndev_alloc(void) +{ + return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); +} + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops) +{ + struct net_device *ndev = NULL; + struct aq_nic_s *self = NULL; + int err = 0; + + ndev = aq_nic_ndev_alloc(); + self = netdev_priv(ndev); + if (!self) { + err = -EINVAL; + goto err_exit; + } + + ndev->netdev_ops = ndev_ops; + ndev->ethtool_ops = et_ops; + + SET_NETDEV_DEV(ndev, dev); + + ndev->if_port = port; + self->ndev = ndev; + + self->aq_pci_func = aq_pci_func; + + self->aq_hw_ops = *aq_hw_ops; + self->port = (u8)port; + + self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, + &self->aq_hw_ops); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + aq_nic_cfg_init_defaults(self); + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +int aq_nic_ndev_register(struct aq_nic_s *self) +{ + int err = 0; + unsigned int i = 0U; + + if (!self->ndev) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, + self->aq_nic_cfg.aq_hw_caps, + self->ndev->dev_addr); + if (err < 0) + goto err_exit; + +#if defined(AQ_CFG_MAC_ADDR_PERMANENT) + { + static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; + + ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + } +#endif + err = register_netdev(self->ndev); + if (err < 0) + goto err_exit; + + self->is_ndev_registered = true; + netif_carrier_off(self->ndev); + + for (i = AQ_CFG_VECS_MAX; i--;) + aq_nic_ndev_queue_stop(self, i); + +err_exit: + return err; +} + +int aq_nic_ndev_init(struct aq_nic_s *self) +{ + struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; + struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; + + self->ndev->hw_features |= aq_hw_caps->hw_features; + self->ndev->features = aq_hw_caps->hw_features; + self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; + self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; + + return 0; +} + +void aq_nic_ndev_free(struct aq_nic_s *self) +{ + if (!self->ndev) + goto err_exit; + + if (self->is_ndev_registered) + unregister_netdev(self->ndev); + + if (self->aq_hw) + self->aq_hw_ops.destroy(self->aq_hw); + + free_netdev(self->ndev); + +err_exit:; +} + +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) +{ + struct aq_nic_s *self = NULL; + int err = 0; + + if (!ndev) { + err = -EINVAL; + goto err_exit; + } + self = netdev_priv(ndev); + + if (!self) { + err = -EINVAL; + goto err_exit; + } + if (netif_running(ndev)) { + unsigned int i; + + for (i = AQ_CFG_VECS_MAX; i--;) + netif_stop_subqueue(ndev, i); + } + + for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; + self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = + aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); + if (!self->aq_vec[self->aq_vecs]) { + err = -ENOMEM; + goto err_exit; + } + } + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring) +{ + self->aq_ring_tx[idx] = ring; +} + +struct device *aq_nic_get_dev(struct aq_nic_s *self) +{ + return self->ndev->dev.parent; +} + +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) +{ + return self->ndev; +} + +int aq_nic_init(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + self->power_state = AQ_HW_POWER_STATE_D0; + err = self->aq_hw_ops.hw_reset(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, + aq_nic_get_ndev(self)->dev_addr); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); + +err_exit: + return err; +} + +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) +{ + netif_start_subqueue(self->ndev, idx); +} + +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) +{ + netif_stop_subqueue(self->ndev, idx); +} + +int aq_nic_start(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + self->packet_filter); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_vec_start(aq_vec); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_start(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + if (err < 0) + goto err_exit; + setup_timer(&self->service_timer, &aq_nic_service_timer_cb, + (unsigned long)self); + mod_timer(&self->service_timer, jiffies + + AQ_CFG_SERVICE_TIMER_INTERVAL); + + if (self->aq_nic_cfg.is_polling) { + setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, + (unsigned long)self); + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); + } else { + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_pci_func_alloc_irq(self->aq_pci_func, i, + self->ndev->name, aq_vec, + aq_vec_get_affinity_mask(aq_vec)); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, + AQ_CFG_IRQ_MASK); + if (err < 0) + goto err_exit; + } + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_start(self, i); + + err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + + err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int frag_count = 0U; + + dx->flags = 0U; + dx->len = skb_headlen(skb); + dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, + DMA_TO_DEVICE); + dx->len_pkt = skb->len; + dx->is_sop = 1U; + dx->is_mapped = 1U; + + ++ret; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; + dx->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; + dx->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + } + + for (; nr_frags--; ++frag_count) { + unsigned int frag_len; + dma_addr_t frag_pa; + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; + + frag_len = skb_frag_size(frag); + + frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, + frag_len, DMA_TO_DEVICE); + + while (frag_len > AQ_CFG_TX_FRAME_MAX) { + ++dx; + ++ret; + dx->flags = 0U; + dx->len = AQ_CFG_TX_FRAME_MAX; + dx->pa = frag_pa; + dx->is_mapped = 1U; + + frag_len -= AQ_CFG_TX_FRAME_MAX; + frag_pa += AQ_CFG_TX_FRAME_MAX; + } + + ++dx; + ++ret; + + dx->flags = 0U; + dx->len = frag_len; + dx->pa = frag_pa; + dx->is_mapped = 1U; + } + + dx->is_eop = 1U; + dx->skb = skb; + + return ret; +} + +static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + dx->flags = 0U; + dx->len_pkt = skb->len; + dx->len_l2 = ETH_HLEN; + dx->len_l3 = ip_hdrlen(skb); + dx->len_l4 = tcp_hdrlen(skb); + dx->mss = skb_shinfo(skb)->gso_size; + dx->is_txc = 1U; + return 1U; +} + +static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + + if (unlikely(skb_is_gso(skb))) { + ret = aq_nic_map_skb_lso(self, skb, dx); + ++dx; + } + + ret += aq_nic_map_skb_frag(self, skb, dx); + + return ret; +} + +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) +__releases(&ring->lock) +__acquires(&ring->lock) +{ + struct aq_ring_s *ring = NULL; + unsigned int frags = 0U; + unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; + unsigned int tc = 0U; + unsigned int trys = AQ_CFG_LOCK_TRYS; + int err = 0; + bool is_nic_in_bad_state; + bool is_busy = false; + struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; + + frags = skb_shinfo(skb)->nr_frags + 1; + + ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; + + atomic_inc(&self->header.busy_count); + is_busy = true; + + if (frags > AQ_CFG_SKB_FRAGS_MAX) { + dev_kfree_skb_any(skb); + goto err_exit; + } + + is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, + AQ_NIC_FLAGS_IS_NOT_TX_READY) || + (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX); + + if (is_nic_in_bad_state) { + aq_nic_ndev_queue_stop(self, ring->idx); + err = NETDEV_TX_BUSY; + goto err_exit; + } + + do { + if (spin_trylock(&ring->header.lock)) { + frags = aq_nic_map_skb(self, skb, &buffers[0]); + + aq_ring_tx_append_buffs(ring, &buffers[0], frags); + + err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, + ring, frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop(self, ring->idx); + } + spin_unlock(&ring->header.lock); + + if (err >= 0) { + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + break; + } + } while (--trys); + + if (!trys) { + err = NETDEV_TX_BUSY; + goto err_exit; + } + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + return err; +} + +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) +{ + int err = 0; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); + if (err < 0) + goto err_exit; + + self->packet_filter = flags; + +err_exit: + return err; +} + +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) +{ + struct netdev_hw_addr *ha = NULL; + unsigned int i = 0U; + + self->mc_list.count = 0U; + + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + ++self->mc_list.count; + } + + return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); +} + +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) +{ + int err = 0; + + if (new_mtu > self->aq_hw_caps.mtu) { + err = -EINVAL; + goto err_exit; + } + self->aq_nic_cfg.mtu = new_mtu; + +err_exit: + return err; +} + +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) +{ + return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); +} + +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) +{ + return self->link_status.mbps; +} + +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) +{ + u32 *regs_buff = p; + int err = 0; + + regs->version = 1; + + err = self->aq_hw_ops.hw_get_regs(self->aq_hw, + &self->aq_hw_caps, regs_buff); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int aq_nic_get_regs_count(struct aq_nic_s *self) +{ + return self->aq_hw_caps.mac_regs_count; +} + +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + unsigned int count = 0U; + int err = 0; + + err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); + if (err < 0) + goto err_exit; + + data += count; + count = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + data += count; + aq_vec_get_sw_stats(aq_vec, data, &count); + } + +err_exit:; + (void)err; +} + +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd) +{ + u32 supported, advertising; + + cmd->base.port = PORT_TP; + /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; + supported |= SUPPORTED_Autoneg; + supported |= SUPPORTED_TP; + + advertising = (self->aq_nic_cfg.is_autoneg) ? + ADVERTISED_Autoneg : 0U; + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + advertising |= (self->aq_nic_cfg.flow_control) ? + ADVERTISED_Pause : 0U; + advertising |= ADVERTISED_TP; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); +} + +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd) +{ + u32 speed = 0U; + u32 rate = 0U; + int err = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + rate = self->aq_hw_caps.link_speed_msk; + self->aq_nic_cfg.is_autoneg = true; + } else { + speed = cmd->base.speed; + + switch (speed) { + case SPEED_100: + rate = AQ_NIC_RATE_100M; + break; + + case SPEED_1000: + rate = AQ_NIC_RATE_1G; + break; + + case SPEED_2500: + rate = AQ_NIC_RATE_2GS; + break; + + case SPEED_5000: + rate = AQ_NIC_RATE_5G; + break; + + case SPEED_10000: + rate = AQ_NIC_RATE_10G; + break; + + default: + err = -1; + goto err_exit; + break; + } + if (!(self->aq_hw_caps.link_speed_msk & rate)) { + err = -1; + goto err_exit; + } + + self->aq_nic_cfg.is_autoneg = false; + } + + err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); + if (err < 0) + goto err_exit; + + self->aq_nic_cfg.link_speed_msk = rate; + +err_exit: + return err; +} + +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) +{ + return &self->aq_nic_cfg; +} + +u32 aq_nic_get_fw_version(struct aq_nic_s *self) +{ + u32 fw_version = 0U; + + self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); + + return fw_version; +} + +int aq_nic_stop(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_stop(self, i); + + del_timer_sync(&self->service_timer); + + self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); + + if (self->aq_nic_cfg.is_polling) + del_timer_sync(&self->polling_timer); + else + aq_pci_func_free_irqs(self->aq_pci_func); + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_stop(aq_vec); + + return self->aq_hw_ops.hw_stop(self->aq_hw); +} + +void aq_nic_deinit(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_deinit(aq_vec); + + if (self->power_state == AQ_HW_POWER_STATE_D0) { + (void)self->aq_hw_ops.hw_deinit(self->aq_hw); + } else { + (void)self->aq_hw_ops.hw_set_power(self->aq_hw, + self->power_state); + } + +err_exit:; +} + +void aq_nic_free_hot_resources(struct aq_nic_s *self) +{ + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_free(self->aq_vec[i]); + } + +err_exit:; +} + +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) +{ + int err = 0; + + if (!netif_running(self->ndev)) { + err = 0; + goto err_exit; + } + rtnl_lock(); + if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { + self->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(self->ndev); + netif_tx_stop_all_queues(self->ndev); + + err = aq_nic_stop(self); + if (err < 0) + goto err_exit; + + aq_nic_deinit(self); + } else { + err = aq_nic_init(self); + if (err < 0) + goto err_exit; + + err = aq_nic_start(self); + if (err < 0) + goto err_exit; + + netif_device_attach(self->ndev); + netif_tx_start_all_queues(self->ndev); + } + rtnl_unlock(); + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h new file mode 100644 index 000000000000..7fc2a5ecb2b7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -0,0 +1,110 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.h: Declaration of common code for NIC. */ + +#ifndef AQ_NIC_H +#define AQ_NIC_H + +#include "aq_common.h" +#include "aq_rss.h" + +struct aq_ring_s; +struct aq_pci_func_s; +struct aq_hw_ops; + +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5GSR BIT(2) +#define AQ_NIC_RATE_2GS BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + +struct aq_nic_cfg_s { + struct aq_hw_caps_s *aq_hw_caps; + u64 hw_features; + u32 rxds; /* rx ring size, descriptors # */ + u32 txds; /* tx ring size, descriptors # */ + u32 vecs; /* vecs==allocated irqs */ + u32 irq_type; + u32 itr; + u32 num_rss_queues; + u32 mtu; + u32 ucp_0x364; + u32 flow_control; + u32 link_speed_msk; + u32 vlan_id; + u16 is_mc_list_enabled; + u16 mc_list_count; + bool is_autoneg; + bool is_interrupt_moderation; + bool is_polling; + bool is_rss; + bool is_lro; + u8 tcs; + struct aq_rss_parameters aq_rss; +}; + +#define AQ_NIC_FLAG_STARTED 0x00000004U +#define AQ_NIC_FLAG_STOPPING 0x00000008U +#define AQ_NIC_FLAG_RESETTING 0x00000010U +#define AQ_NIC_FLAG_CLOSING 0x00000020U +#define AQ_NIC_LINK_DOWN 0x04000000U +#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_NIC_FLAG_ERR_HW 0x80000000U + +#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ + ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops); +int aq_nic_ndev_init(struct aq_nic_s *self); +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev); +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring); +struct device *aq_nic_get_dev(struct aq_nic_s *self); +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); +int aq_nic_init(struct aq_nic_s *self); +int aq_nic_cfg_start(struct aq_nic_s *self); +int aq_nic_ndev_register(struct aq_nic_s *self); +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_free(struct aq_nic_s *self); +int aq_nic_start(struct aq_nic_s *self); +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); +int aq_nic_get_regs_count(struct aq_nic_s *self); +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); +int aq_nic_stop(struct aq_nic_s *self); +void aq_nic_deinit(struct aq_nic_s *self); +void aq_nic_free_hot_resources(struct aq_nic_s *self); +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev); +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self); +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd); +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd); +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); +u32 aq_nic_get_fw_version(struct aq_nic_s *self); +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); + +#endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h new file mode 100644 index 000000000000..f81738a71c42 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h @@ -0,0 +1,46 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic_internal.h: Definition of private object structure. */ + +#ifndef AQ_NIC_INTERNAL_H +#define AQ_NIC_INTERNAL_H + +struct aq_nic_s { + struct aq_obj_s header; + struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_hw_s *aq_hw; + struct net_device *ndev; + struct aq_pci_func_s *aq_pci_func; + unsigned int aq_vecs; + unsigned int packet_filter; + unsigned int power_state; + bool is_ndev_registered; + u8 port; + struct aq_hw_ops aq_hw_ops; + struct aq_hw_caps_s aq_hw_caps; + struct aq_nic_cfg_s aq_nic_cfg; + struct timer_list service_timer; + struct timer_list polling_timer; + struct aq_hw_link_status_s link_status; + struct { + u32 count; + u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + } mc_list; +}; + +#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ + AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ + AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) + +#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ + AQ_NIC_LINK_DOWN) + +#endif /* AQ_NIC_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c new file mode 100644 index 000000000000..da4bc09dac51 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -0,0 +1,343 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.c: Definition of PCI functions. */ + +#include "aq_pci_func.h" +#include "aq_nic.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include <linux/interrupt.h> + +struct aq_pci_func_s { + struct pci_dev *pdev; + struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; + void __iomem *mmio; + void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; + resource_size_t mmio_pa; + unsigned int msix_entry_mask; + unsigned int irq_type; + unsigned int ports; + bool is_pci_enabled; + bool is_regions; + bool is_pci_using_dac; + struct aq_hw_caps_s aq_hw_caps; + struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS]; +}; + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops) +{ + struct aq_pci_func_s *self = NULL; + int err = 0; + unsigned int port = 0U; + + if (!aq_hw_ops) { + err = -EFAULT; + goto err_exit; + } + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + pci_set_drvdata(pdev, self); + self->pdev = pdev; + + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + self->ports = self->aq_hw_caps.ports; + + for (port = 0; port < self->ports; ++port) { + struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, + &pdev->dev, self, + port, aq_hw_ops); + + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + self->port[port] = aq_nic; + } + +err_exit: + if (err < 0) { + if (self) + aq_pci_func_free(self); + self = NULL; + } + + (void)err; + return self; +} + +int aq_pci_func_init(struct aq_pci_func_s *self) +{ + int err = 0; + unsigned int bar = 0U; + unsigned int port = 0U; + unsigned int i = 0U; + + err = pci_enable_device(self->pdev); + if (err < 0) + goto err_exit; + + self->is_pci_enabled = true; + + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); + self->is_pci_using_dac = 1; + } + if (err) { + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(self->pdev, + DMA_BIT_MASK(32)); + self->is_pci_using_dac = 0; + } + if (err != 0) { + err = -ENOSR; + goto err_exit; + } + + err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); + if (err < 0) + goto err_exit; + + self->is_regions = true; + + pci_set_master(self->pdev); + + for (bar = 0; bar < 4; ++bar) { + if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { + resource_size_t reg_sz; + + self->mmio_pa = pci_resource_start(self->pdev, bar); + if (self->mmio_pa == 0U) { + err = -EIO; + goto err_exit; + } + + reg_sz = pci_resource_len(self->pdev, bar); + if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { + err = -EIO; + goto err_exit; + } + + self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); + if (!self->mmio) { + err = -EIO; + goto err_exit; + } + break; + } + } + + for (i = 0; i < self->aq_hw_caps.msix_irqs; i++) + self->msix_entry[i].entry = i; + + /*enable interrupts */ +#if AQ_CFG_FORCE_LEGACY_INT + self->irq_type = AQ_HW_IRQ_LEGACY; +#else + err = pci_enable_msix(self->pdev, self->msix_entry, + self->aq_hw_caps.msix_irqs); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSIX; + } else { + err = pci_enable_msi(self->pdev); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSI; + } else { + self->irq_type = AQ_HW_IRQ_LEGACY; + err = 0; + } + } +#endif + + /* net device init */ + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + err = aq_nic_cfg_start(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_init(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_register(self->port[port]); + if (err < 0) + goto err_exit; + } + +err_exit: + if (err < 0) + aq_pci_func_deinit(self); + return err; +} + +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, cpumask_t *affinity_mask) +{ + int err = 0; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0, + name, aq_vec); + break; + + case AQ_HW_IRQ_MSI: + err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec); + break; + + case AQ_HW_IRQ_LEGACY: + err = request_irq(self->pdev->irq, aq_vec_isr_legacy, + IRQF_SHARED, name, aq_vec); + break; + + default: + err = -EFAULT; + break; + } + + if (err >= 0) { + self->msix_entry_mask |= (1 << i); + self->aq_vec[i] = aq_vec; + + if (self->irq_type == AQ_HW_IRQ_MSIX) + irq_set_affinity_hint(self->msix_entry[i].vector, + affinity_mask); + } + + return err; +} + +void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +{ + unsigned int i = 0U; + + for (i = 32U; i--;) { + if (!((1U << i) & self->msix_entry_mask)) + continue; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + irq_set_affinity_hint(self->msix_entry[i].vector, NULL); + free_irq(self->msix_entry[i].vector, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_MSI: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_LEGACY: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + default: + break; + } + + self->msix_entry_mask &= ~(1U << i); + } +} + +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) +{ + return self->mmio; +} + +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) +{ + return self->irq_type; +} + +void aq_pci_func_deinit(struct aq_pci_func_s *self) +{ + if (!self) + goto err_exit; + + aq_pci_func_free_irqs(self); + + switch (self->irq_type) { + case AQ_HW_IRQ_MSI: + pci_disable_msi(self->pdev); + break; + + case AQ_HW_IRQ_MSIX: + pci_disable_msix(self->pdev); + break; + + case AQ_HW_IRQ_LEGACY: + break; + + default: + break; + } + + if (self->is_regions) + pci_release_regions(self->pdev); + + if (self->is_pci_enabled) + pci_disable_device(self->pdev); + +err_exit:; +} + +void aq_pci_func_free(struct aq_pci_func_s *self) +{ + unsigned int port = 0U; + + if (!self) + goto err_exit; + + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + aq_nic_ndev_free(self->port[port]); + } + + kfree(self); + +err_exit:; +} + +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg) +{ + int err = 0; + unsigned int port = 0U; + + if (!self) { + err = -EFAULT; + goto err_exit; + } + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + (void)aq_nic_change_pm_state(self->port[port], pm_msg); + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h new file mode 100644 index 000000000000..ecb033791203 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.h: Declaration of PCI functions. */ + +#ifndef AQ_PCI_FUNC_H +#define AQ_PCI_FUNC_H + +#include "aq_common.h" + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops); +int aq_pci_func_init(struct aq_pci_func_s *self); +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, + cpumask_t *affinity_mask); +void aq_pci_func_free_irqs(struct aq_pci_func_s *self); +int aq_pci_func_start(struct aq_pci_func_s *self); +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self); +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self); +void aq_pci_func_deinit(struct aq_pci_func_s *self); +void aq_pci_func_free(struct aq_pci_func_s *self); +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg); + +#endif /* AQ_PCI_FUNC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c new file mode 100644 index 000000000000..dea9e9bbb8e7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -0,0 +1,375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.c: Definition of functions for Rx/Tx rings. */ + +#include "aq_ring.h" +#include "aq_nic.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic) +{ + int err = 0; + + self->buff_ring = + kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); + + if (!self->buff_ring) { + err = -ENOMEM; + goto err_exit; + } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); + if (!self->dx_ring) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->txds; + self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->rxds; + self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +int aq_ring_init(struct aq_ring_s *self) +{ + self->hw_head = 0; + self->sw_head = 0; + self->sw_tail = 0; + return 0; +} + +void aq_ring_tx_append_buffs(struct aq_ring_s *self, + struct aq_ring_buff_s *buffer, + unsigned int buffers) +{ + if (likely(self->sw_tail + buffers < self->size)) { + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * buffers); + } else { + unsigned int first_part = self->size - self->sw_tail; + unsigned int second_part = buffers - first_part; + + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * first_part); + + memcpy(&self->buff_ring[0], &buffer[first_part], + sizeof(buffer[0]) * second_part); + } +} + +int aq_ring_tx_clean(struct aq_ring_s *self) +{ + struct device *dev = aq_nic_get_dev(self->aq_nic); + + for (; self->sw_head != self->hw_head; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) + dma_unmap_single(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } + + if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX) + aq_nic_ndev_queue_start(self->aq_nic, self->idx); + + return 0; +} + +static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, + unsigned int t) +{ + return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); +} + +#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +{ + struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); + int err = 0; + bool is_rsc_completed = true; + + for (; (self->sw_head != self->hw_head) && budget; + self->sw_head = aq_ring_next_dx(self, self->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + unsigned int i = 0U; + struct aq_ring_buff_s *buff_ = NULL; + + if (buff->is_error) { + __free_pages(buff->page, 0); + continue; + } + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + for (next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_]) { + is_rsc_completed = + aq_ring_dx_in_range(self->sw_head, + next_, + self->hw_head); + + if (unlikely(!is_rsc_completed)) { + is_rsc_completed = false; + break; + } + + if (buff_->is_eop) + break; + } + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + } + + /* for single fragment packets use build_skb() */ + if (buff->is_eop) { + skb = build_skb(page_address(buff->page), + buff->len + AQ_SKB_ALIGN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + + skb->dev = ndev; + skb_put(skb, buff->len); + } else { + skb = netdev_alloc_skb(ndev, ETH_HLEN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + skb_put(skb, ETH_HLEN); + memcpy(skb->data, page_address(buff->page), ETH_HLEN); + + skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, + buff->len - ETH_HLEN, + SKB_TRUESIZE(buff->len - ETH_HLEN)); + + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } + } + + skb->protocol = eth_type_trans(skb, ndev); + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + __skb_mark_checksum_bad(skb); + } else { + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + + skb_record_rx_queue(skb, self->idx); + + netif_receive_skb(skb); + + ++self->stats.rx.packets; + self->stats.rx.bytes += skb->len; + } + +err_exit: + return err; +} + +int aq_ring_rx_fill(struct aq_ring_s *self) +{ + struct aq_ring_buff_s *buff = NULL; + int err = 0; + int i = 0; + + for (i = aq_ring_avail_dx(self); i--; + self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { + buff = &self->buff_ring[self->sw_tail]; + + buff->flags = 0U; + buff->len = AQ_CFG_RX_FRAME_MAX; + + buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, 0); + if (!buff->page) { + err = -ENOMEM; + goto err_exit; + } + + buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), + buff->page, 0, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { + err = -ENOMEM; + goto err_exit; + } + + buff = NULL; + } + +err_exit: + if (err < 0) { + if (buff && buff->page) + __free_pages(buff->page, 0); + } + + return err; +} + +void aq_ring_rx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + __free_pages(buff->page, 0); + } + +err_exit:; +} + +void aq_ring_tx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct device *ndev = aq_nic_get_dev(self->aq_nic); + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) { + dma_unmap_single(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } +err_exit:; +} + +void aq_ring_free(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + kfree(self->buff_ring); + + if (self->dx_ring) + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size, self->dx_ring, + self->dx_ring_pa); + +err_exit:; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h new file mode 100644 index 000000000000..0ac3f9e7bee6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -0,0 +1,157 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */ + +#ifndef AQ_RING_H +#define AQ_RING_H + +#include "aq_common.h" + +struct page; + +/* TxC SOP DX EOP + * +----------+----------+----------+----------- + * 8bytes|len l3,l4 | pa | pa | pa + * +----------+----------+----------+----------- + * 4/8bytes|len pkt |len pkt | | skb + * +----------+----------+----------+----------- + * 4/8bytes|is_txc |len,flags |len |len,is_eop + * +----------+----------+----------+----------- + * + * This aq_ring_buff_s doesn't have endianness dependency. + * It is __packed for cache line optimizations. + */ +struct __packed aq_ring_buff_s { + union { + /* RX */ + struct { + u32 rss_hash; + u16 next; + u8 is_hash_l4; + u8 rsvd1; + struct page *page; + }; + /* EOP */ + struct { + dma_addr_t pa_eop; + struct sk_buff *skb; + }; + /* DX */ + struct { + dma_addr_t pa; + }; + /* SOP */ + struct { + dma_addr_t pa_sop; + u32 len_pkt_sop; + }; + /* TxC */ + struct { + u32 mss; + u8 len_l2; + u8 len_l3; + u8 len_l4; + u8 rsvd2; + u32 len_pkt; + }; + }; + union { + struct { + u32 len:16; + u32 is_ip_cso:1; + u32 is_udp_cso:1; + u32 is_tcp_cso:1; + u32 is_cso_err:1; + u32 is_sop:1; + u32 is_eop:1; + u32 is_txc:1; + u32 is_mapped:1; + u32 is_cleaned:1; + u32 is_error:1; + u32 rsvd3:6; + }; + u32 flags; + }; +}; + +struct aq_ring_stats_rx_s { + u64 errors; + u64 packets; + u64 bytes; + u64 lro_packets; + u64 jumbo_packets; +}; + +struct aq_ring_stats_tx_s { + u64 errors; + u64 packets; + u64 bytes; +}; + +union aq_ring_stats_s { + struct aq_ring_stats_rx_s rx; + struct aq_ring_stats_tx_s tx; +}; + +struct aq_ring_s { + struct aq_obj_s header; + struct aq_ring_buff_s *buff_ring; + u8 *dx_ring; /* descriptors ring, dma shared mem */ + struct aq_nic_s *aq_nic; + unsigned int idx; /* for HW layer registers operations */ + unsigned int hw_head; + unsigned int sw_head; + unsigned int sw_tail; + unsigned int size; /* descriptors number */ + unsigned int dx_size; /* TX or RX descriptor size, */ + /* stored here for fater math */ + union aq_ring_stats_s stats; + dma_addr_t dx_ring_pa; +}; + +struct aq_ring_param_s { + unsigned int vec_idx; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, + unsigned int dx) +{ + return (++dx >= self->size) ? 0U : dx; +} + +static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self) +{ + return (((self->sw_tail >= self->sw_head)) ? + (self->size - 1) - self->sw_tail + self->sw_head : + self->sw_head - self->sw_tail - 1); +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_init(struct aq_ring_s *self); +void aq_ring_tx_deinit(struct aq_ring_s *self); +void aq_ring_rx_deinit(struct aq_ring_s *self); +void aq_ring_free(struct aq_ring_s *self); +void aq_ring_tx_append_buffs(struct aq_ring_s *ring, + struct aq_ring_buff_s *buffer, + unsigned int buffers); +int aq_ring_tx_clean(struct aq_ring_s *self); +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_fill(struct aq_ring_s *self); + +#endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h new file mode 100644 index 000000000000..1db6eb20a8f2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h @@ -0,0 +1,26 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_rss.h: Receive Side Scaling definitions. */ + +#ifndef AQ_RSS_H +#define AQ_RSS_H + +#include "aq_common.h" +#include "aq_cfg.h" + +struct aq_rss_parameters { + u16 base_cpu_number; + u16 indirection_table_size; + u16 hash_secret_key_size; + u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)]; + u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX]; +}; + +#endif /* AQ_RSS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h new file mode 100644 index 000000000000..4446bd90fd86 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -0,0 +1,50 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_utils.h: Useful macro and structures used in all layers of driver. */ + +#ifndef AQ_UTILS_H +#define AQ_UTILS_H + +#include "aq_common.h" + +#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) + +struct aq_obj_s { + spinlock_t lock; /* spinlock for nic/rings processing */ + atomic_t flags; + atomic_t busy_count; +}; + +static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old | (mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old & ~(mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask) +{ + return atomic_read(flags) & mask; +} + +#endif /* AQ_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c new file mode 100644 index 000000000000..cb30a6396a70 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -0,0 +1,392 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. + * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. + */ + +#include "aq_vec.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> + +struct aq_vec_s { + struct aq_obj_s header; + struct aq_hw_ops *aq_hw_ops; + struct aq_hw_s *aq_hw; + struct aq_nic_s *aq_nic; + unsigned int tx_rings; + unsigned int rx_rings; + struct aq_ring_param_s aq_ring_param; + struct napi_struct napi; + struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; +}; + +#define AQ_VEC_TX_ID 0 +#define AQ_VEC_RX_ID 1 + +static int aq_vec_poll(struct napi_struct *napi, int budget) +__releases(&self->lock) +__acquires(&self->lock) +{ + struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); + struct aq_ring_s *ring = NULL; + int work_done = 0; + int err = 0; + unsigned int i = 0U; + unsigned int sw_tail_old = 0U; + bool was_tx_cleaned = false; + + if (!self) { + err = -EINVAL; + } else if (spin_trylock(&self->header.lock)) { + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + if (self->aq_hw_ops->hw_ring_tx_head_update) { + err = self->aq_hw_ops->hw_ring_tx_head_update( + self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + } + + if (ring[AQ_VEC_TX_ID].sw_head != + ring[AQ_VEC_TX_ID].hw_head) { + err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + was_tx_cleaned = true; + } + + err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + if (ring[AQ_VEC_RX_ID].sw_head != + ring[AQ_VEC_RX_ID].hw_head) { + err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + &work_done, + budget - work_done); + if (err < 0) + goto err_exit; + + sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill( + self->aq_hw, + &ring[AQ_VEC_RX_ID], sw_tail_old); + if (err < 0) + goto err_exit; + } + } + + if (was_tx_cleaned) + work_done = budget; + + if (work_done < budget) { + napi_complete_done(napi, work_done); + self->aq_hw_ops->hw_irq_enable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + } + +err_exit: + spin_unlock(&self->header.lock); + } + + return work_done; +} + +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + struct aq_vec_s *self = NULL; + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + self->aq_nic = aq_nic; + self->aq_ring_param.vec_idx = idx; + self->aq_ring_param.cpu = + idx + aq_nic_cfg->aq_rss.base_cpu_number; + + cpumask_set_cpu(self->aq_ring_param.cpu, + &self->aq_ring_param.affinity_mask); + + self->tx_rings = 0; + self->rx_rings = 0; + + netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, + aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + + for (i = 0; i < aq_nic_cfg->tcs; ++i) { + unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, + self->tx_rings, + self->aq_ring_param.vec_idx); + + ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->tx_rings; + + aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->rx_rings; + } + +err_exit: + if (err < 0) { + aq_vec_free(self); + self = NULL; + } + return self; +} + +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self->aq_hw_ops = aq_hw_ops; + self->aq_hw = aq_hw; + + spin_lock_init(&self->header.lock); + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = aq_ring_init(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, + &ring[AQ_VEC_TX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_init(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, + &ring[AQ_VEC_RX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, + &ring[AQ_VEC_RX_ID], 0U); + if (err < 0) + goto err_exit; + } + +err_exit: + return err; +} + +int aq_vec_start(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + } + + napi_enable(&self->napi); + +err_exit: + return err; +} + +void aq_vec_stop(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + + self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + } + + napi_disable(&self->napi); +} + +void aq_vec_deinit(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]); + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); + } +err_exit:; +} + +void aq_vec_free(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_free(&ring[AQ_VEC_TX_ID]); + aq_ring_free(&ring[AQ_VEC_RX_ID]); + } + + netif_napi_del(&self->napi); + + kfree(self); + +err_exit:; +} + +irqreturn_t aq_vec_isr(int irq, void *private) +{ + struct aq_vec_s *self = private; + int err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + napi_schedule(&self->napi); + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +irqreturn_t aq_vec_isr_legacy(int irq, void *private) +{ + struct aq_vec_s *self = private; + u64 irq_mask = 0U; + irqreturn_t err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); + if (err < 0) + goto err_exit; + + if (irq_mask) { + self->aq_hw_ops->hw_irq_disable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + napi_schedule(&self->napi); + } else { + self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); + err = IRQ_NONE; + } + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) +{ + return &self->aq_ring_param.affinity_mask; +} + +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx) +{ + struct aq_ring_s *ring = NULL; + unsigned int r = 0U; + + for (r = 0U, ring = self->ring[0]; + self->tx_rings > r; ++r, ring = self->ring[r]) { + struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; + struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; + + stats_rx->packets += rx->packets; + stats_rx->bytes += rx->bytes; + stats_rx->errors += rx->errors; + stats_rx->jumbo_packets += rx->jumbo_packets; + stats_rx->lro_packets += rx->lro_packets; + + stats_tx->packets += tx->packets; + stats_tx->bytes += tx->bytes; + stats_tx->errors += tx->errors; + } +} + +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) +{ + unsigned int count = 0U; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + aq_vec_add_stats(self, &stats_rx, &stats_tx); + + data[count] += stats_rx.packets; + data[++count] += stats_tx.packets; + data[++count] += stats_rx.jumbo_packets; + data[++count] += stats_rx.lro_packets; + data[++count] += stats_rx.errors; + + if (p_count) + *p_count = ++count; + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h new file mode 100644 index 000000000000..6c68b184236c --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -0,0 +1,42 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings. + * Declaration of functions for Rx and Tx rings. + */ + +#ifndef AQ_VEC_H +#define AQ_VEC_H + +#include "aq_common.h" +#include <linux/irqreturn.h> + +struct aq_hw_s; +struct aq_hw_ops; +struct aq_ring_stats_rx_s; +struct aq_ring_stats_tx_s; + +irqreturn_t aq_vec_isr(int irq, void *private); +irqreturn_t aq_vec_isr_legacy(int irq, void *private); +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw); +void aq_vec_deinit(struct aq_vec_s *self); +void aq_vec_free(struct aq_vec_s *self); +int aq_vec_start(struct aq_vec_s *self); +void aq_vec_stop(struct aq_vec_s *self); +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, + unsigned int *p_count); +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx); + +#endif /* AQ_VEC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c new file mode 100644 index 000000000000..1f388054a6c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -0,0 +1,905 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_a0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_a0_internal.h" + +static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_a0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_a0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_A0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_A0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX * + HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_A0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + rpf_vlan_prom_mode_en_set(self, 1); + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + hw_atl_a0_hw_init_tx_path(self); + hw_atl_a0_hw_init_rx_path(self); + + hw_atl_a0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + reg_tx_dma_debug_ctl_set(self, 0x800000b8U); + reg_tx_dma_debug_ctl_set(self, 0x000000b8U); + + hw_atl_a0_hw_qos_set(self); + hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | + ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | + ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); + + hw_atl_a0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_A0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_A0_TXD_CTL_CMD_TCP | + HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_a0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_A0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ + if ((1U << 4) & + reg_rx_dma_desc_status_get(self, ring->idx)) { + rdm_rx_desc_en_set(self, false, ring->idx); + rdm_rx_desc_res_set(self, true, ring->idx); + rdm_rx_desc_res_set(self, false, ring->idx); + rdm_rx_desc_en_set(self, true, ring->idx); + } + + if (ring->hw_head || + (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) { + break; + } else if (!(rxd_wb->status & 0x1U)) { + struct hw_atl_rxd_wb_s *rxd_wb1 = + (struct hw_atl_rxd_wb_s *) + (&ring->dx_ring[(1U) * + HW_ATL_A0_RXD_SIZE]); + + if ((rxd_wb1->status & 0x1U)) { + rxd_wb->pkt_len = 1514U; + rxd_wb->status = 3U; + } else { + break; + } + } + } + + buff = &ring->buff_ring[ring->hw_head]; + + if (0x3U != (rxd_wb->status & 0x3U)) + rxd_wb->status |= 4; + + is_err = (0x0000001CU & rxd_wb->status); + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; + } + + is_err &= ~0x18U; + is_err &= ~0x04U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + /* jumbo */ + buff->next = aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask) | + (1U << HW_ATL_A0_ERR_INT)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + if ((1U << 16) & reg_gen_irq_status_get(self)) + + atomic_inc(&PHAL_ATLANTIC_A0->dpc); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = + IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { + err = EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_A0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_A0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + if (self->aq_nic_cfg->itr != 0xFFFFU) { + u32 itr_ = (self->aq_nic_cfg->itr >> 1); + + itr_ = min(AQ_CFG_IRQ_MASK, itr_); + + PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | + (itr_ << 0x10); + } else { + u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); + + if (n < self->aq_link_status.mbps) { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } else { + static unsigned int hw_timers_tbl_[] = { + 0x01CU, /* 10Gbit */ + 0x039U, /* 5Gbit */ + 0x039U, /* 5Gbit 5GS */ + 0x073U, /* 2.5Gbit */ + 0x120U, /* 1Gbit */ + 0x1FFU, /* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_A0->itr_rx = + 0x80000000U | + (hw_timers_tbl_[speed_index] << 0x10U); + } + + aq_hw_write_reg(self, 0x00002A00U, 0x40000000U); + aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); + } + } else { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } + + for (i = HW_ATL_A0_RINGS_MAX; i--;) + reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_a0_create, + .destroy = hw_atl_a0_destroy, + .get_hw_caps = hw_atl_a0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_a0_hw_set_speed, + .hw_init = hw_atl_a0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_a0_hw_reset, + .hw_start = hw_atl_a0_hw_start, + .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, + .hw_stop = hw_atl_a0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_a0_hw_irq_enable, + .hw_irq_disable = hw_atl_a0_hw_irq_disable, + .hw_irq_read = hw_atl_a0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_a0_hw_rss_set, + .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 1U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h new file mode 100644 index 000000000000..6e1d527954c9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_A0_H +#define HW_ATL_A0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_A0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h new file mode 100644 index 000000000000..1093ea18823a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -0,0 +1,155 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific + * constants. + */ + +#ifndef HW_ATL_A0_INTERNAL_H +#define HW_ATL_A0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_A0_MTU_JUMBO 9014U + +#define HW_ATL_A0_TX_RINGS 4U +#define HW_ATL_A0_RX_RINGS 4U + +#define HW_ATL_A0_RINGS_MAX 32U +#define HW_ATL_A0_TXD_SIZE 16U +#define HW_ATL_A0_RXD_SIZE 16U + +#define HW_ATL_A0_MAC 0U +#define HW_ATL_A0_MAC_MIN 1U +#define HW_ATL_A0_MAC_MAX 33U + +/* interrupts */ +#define HW_ATL_A0_ERR_INT 8U +#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU + +#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U +#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U +#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U + +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U +#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U +#define HW_ATL_A0_TXD_CTL_DD 0x00100000U +#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U + +#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U + +#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_A0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_A0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_A0_RATE_10G BIT(0) +#define HW_ATL_A0_RATE_5G BIT(1) +#define HW_ATL_A0_RATE_2G5 BIT(3) +#define HW_ATL_A0_RATE_1G BIT(4) +#define HW_ATL_A0_RATE_100M BIT(5) + +#define HW_ATL_A0_TXBUF_MAX 160U +#define HW_ATL_A0_RXBUF_MAX 320U + +#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U + +#define HW_ATL_A0_TC_MAX 1U +#define HW_ATL_A0_RSS_MAX 8U + +#define HW_ATL_A0_FW_SEMA_RAM 0x2U + +#define HW_ATL_A0_RXD_DD 0x1U +#define HW_ATL_A0_RXD_NCEA0 0x1U + +#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U + +#define HW_ATL_A0_UCP_0X370_REG 0x370U + +#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_A0_RSS_MAX, + .tcs = HW_ATL_A0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_A0_RXD_SIZE, + .rxds = 248U, + .txd_alignment = 1U, + .txd_size = HW_ATL_A0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_A0_TX_RINGS, + .rx_rings = HW_ATL_A0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_A0_RATE_10G | + HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_A0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c new file mode 100644 index 000000000000..e7e694f693bd --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -0,0 +1,958 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_b0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_b0_internal.h" + +static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_b0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_b0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_B0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_B0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * + HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + unsigned int i; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + +/* LRO offloads */ + { + unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + rpo_lro_max_num_of_descriptors_set(self, val, i); + + rpo_lro_time_base_divider_set(self, 0x61AU); + rpo_lro_inactive_interval_set(self, 0); + rpo_lro_max_coalescing_interval_set(self, 2); + + rpo_lro_qsessions_lim_set(self, 1U); + + rpo_lro_total_desc_lim_set(self, 2U); + + rpo_lro_patch_optimization_en_set(self, 0U); + + rpo_lro_min_pay_of_first_pkt_set(self, 10U); + + rpo_lro_pkt_lim_set(self, 1U); + + rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + } + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_B0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + + if (cfg->vlan_id) { + rpf_vlan_flr_act_set(self, 1U, 0U); + rpf_vlan_id_flr_set(self, 0U, 0U); + rpf_vlan_flr_en_set(self, 0U, 0U); + + rpf_vlan_accept_untagged_packets_set(self, 1U); + rpf_vlan_untagged_act_set(self, 1U); + + rpf_vlan_flr_act_set(self, 1U, 1U); + rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); + rpf_vlan_flr_en_set(self, 1U, 1U); + } else { + rpf_vlan_prom_mode_en_set(self, 1); + } + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00005040U, + IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); + + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_B0->chip_features); + + hw_atl_b0_hw_init_tx_path(self); + hw_atl_b0_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + hw_atl_b0_hw_qos_set(self); + hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_B0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_B0_TXD_CTL_CMD_TCP | + HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_b0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_B0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ + break; + } + + buff = &ring->buff_ring[ring->hw_head]; + + is_err = (0x0000003CU & rxd_wb->status); + + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + is_err &= ~0x20U; /* exclude validity bit */ + + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; + } + + is_err &= ~0x18U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; + } else { + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + atomic_inc(&PHAL_ATLANTIC_B0->dpc); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); + + rpfl2_accept_all_mc_packets_set(self, + IS_FILTER_ENABLED(IFF_ALLMULTI)); + + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { + err = -EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_B0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_B0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + tdm_tdm_intr_moder_en_set(self, 1U); + rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + rdm_rdm_intr_moder_en_set(self, 1U); + + PHAL_ATLANTIC_B0->itr_tx = 2U; + PHAL_ATLANTIC_B0->itr_rx = 2U; + + if (self->aq_nic_cfg->itr != 0xFFFFU) { + unsigned int max_timer = self->aq_nic_cfg->itr / 2U; + unsigned int min_timer = self->aq_nic_cfg->itr / 32U; + + max_timer = min(0x1FFU, max_timer); + min_timer = min(0xFFU, min_timer); + + PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; + PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; + } else { + static unsigned int hw_atl_b0_timers_table_tx_[][2] = { + {0xffU, 0xffU}, /* 10Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit 5GS */ + {0xffU, 0x1ffU}, /* 2.5Gbit */ + {0xffU, 0x1ffU}, /* 1Gbit */ + {0xffU, 0x1ffU}, /* 100Mbit */ + }; + + static unsigned int hw_atl_b0_timers_table_rx_[][2] = { + {0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [1] << 0x10U; /* set max timer value */ + + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [1] << 0x10U; /* set max timer value */ + } + } else { + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + tdm_tdm_intr_moder_en_set(self, 0U); + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + rdm_rdm_intr_moder_en_set(self, 0U); + PHAL_ATLANTIC_B0->itr_tx = 0U; + PHAL_ATLANTIC_B0->itr_rx = 0U; + } + + for (i = HW_ATL_B0_RINGS_MAX; i--;) { + reg_tx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_tx, i); + reg_rx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_rx, i); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_b0_create, + .destroy = hw_atl_b0_destroy, + .get_hw_caps = hw_atl_b0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_b0_hw_set_speed, + .hw_init = hw_atl_b0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_b0_hw_reset, + .hw_start = hw_atl_b0_hw_start, + .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, + .hw_stop = hw_atl_b0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_b0_hw_irq_enable, + .hw_irq_disable = hw_atl_b0_hw_irq_disable, + .hw_irq_read = hw_atl_b0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_b0_hw_rss_set, + .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 2U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h new file mode 100644 index 000000000000..a1e1bce6c1f3 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_B0_H +#define HW_ATL_B0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_B0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h new file mode 100644 index 000000000000..8bdee3ddd5a0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -0,0 +1,207 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific + * constants. + */ + +#ifndef HW_ATL_B0_INTERNAL_H +#define HW_ATL_B0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_B0_MTU_JUMBO (16000U) +#define HW_ATL_B0_MTU 1514U + +#define HW_ATL_B0_TX_RINGS 4U +#define HW_ATL_B0_RX_RINGS 4U + +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_TXD_SIZE (16U) +#define HW_ATL_B0_RXD_SIZE (16U) + +#define HW_ATL_B0_MAC 0U +#define HW_ATL_B0_MAC_MIN 1U +#define HW_ATL_B0_MAC_MAX 33U + +/* UCAST/MCAST filters */ +#define HW_ATL_B0_UCAST_FILTERS_MAX 38 +#define HW_ATL_B0_MCAST_FILTERS_MAX 8 + +/* interrupts */ +#define HW_ATL_B0_ERR_INT 8U +#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000) +#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000) +#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000) + +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001) +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002) +#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0) +#define HW_ATL_B0_TXD_CTL_DD (0x00100000) +#define HW_ATL_B0_TXD_CTL_EOP (0x00200000) + +#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000) + +#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_B0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_B0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_B0_RATE_10G BIT(0) +#define HW_ATL_B0_RATE_5G BIT(1) +#define HW_ATL_B0_RATE_2G5 BIT(3) +#define HW_ATL_B0_RATE_1G BIT(4) +#define HW_ATL_B0_RATE_100M BIT(5) + +#define HW_ATL_B0_TXBUF_MAX 160U +#define HW_ATL_B0_RXBUF_MAX 320U + +#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U +#define HW_ATL_B0_RSS_HASHKEY_BITS 320U + +#define HW_ATL_B0_TCRSS_4_8 1 +#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_RSS_MAX 8U + +#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_RS_SLIP_ENABLED 0U + +/* (256k -1(max pay_len) - 54(header)) */ +#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U + +/* (256k -1(max pay_len) - 74(header)) */ +#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U + +#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U +#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU + +#define HW_ATL_B0_FW_SEMA_RAM 0x2U + +#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000) + +#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007) +#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008) +#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0) +#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000) +#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000) + +#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */ +#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */ +#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000) + +#define HW_ATL_B0_RXD_DD (0x1) +#define HW_ATL_B0_RXD_NCEA0 (0x1) + +#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F) +#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0) +#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000) +#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000) +#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000) + +#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001) +#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002) +#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C) +#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004) +#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008) +#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010) +#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0) +#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000) + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define HW_ATL_B0_UCP_0X370_REG (0x370) + +#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10) + +#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_B0_RSS_MAX, + .tcs = HW_ATL_B0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_B0_RXD_SIZE, + .rxds = 8U * 1024U, + .txd_alignment = 1U, + .txd_size = HW_ATL_B0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_B0_TX_RINGS, + .rx_rings = HW_ATL_B0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_LRO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_B0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c new file mode 100644 index 000000000000..3de651afa8c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -0,0 +1,1394 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" +#include "../aq_hw_utils.h" + +/* global */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) +{ + aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem); +} + +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +{ + return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore)); +} + +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr, + glb_reg_res_dis_msk, + glb_reg_res_dis_shift, + glb_reg_res_dis); +} + +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +{ + aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk, + glb_soft_res_shift, soft_res); +} + +u32 glb_soft_res_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr, + glb_soft_res_msk, + glb_soft_res_shift); +} + +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr); +} + +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, glb_mif_id_adr); +} + +/* stats */ +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); +} + +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); +} + +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); +} + +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr); +} + +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); +} + +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr); +} + +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); +} + +/* interrupt */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw) +{ + aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); +} + +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); +} + +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +{ + aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); +} + +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr, + itr_reg_res_dsbl_msk, + itr_reg_res_dsbl_shift, irq_reg_res_dis); +} + +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); +} + +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, itr_isrlsw_adr); +} + +u32 itr_res_irq_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift); +} + +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +{ + aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift, res_irq); +} + +/* rdm */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca), + rdm_dcadcpuid_msk, + rdm_dcadcpuid_shift, cpuid); +} + +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, + rdm_dca_en_shift, rx_dca_en); +} + +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, + rdm_dca_mode_shift, rx_dca_mode); +} + +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor), + rdm_descddata_size_msk, + rdm_descddata_size_shift, + rx_desc_data_buff_size); +} + +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca), + rdm_dcaddesc_en_msk, + rdm_dcaddesc_en_shift, + rx_desc_dca_en); +} + +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor), + rdm_descden_msk, + rdm_descden_shift, + rx_desc_en); +} + +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor), + rdm_descdhdr_size_msk, + rdm_descdhdr_size_shift, + rx_desc_head_buff_size); +} + +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor), + rdm_descdhdr_split_msk, + rdm_descdhdr_split_shift, + rx_desc_head_splitting); +} + +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor), + rdm_descdhd_msk, rdm_descdhd_shift); +} + +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor), + rdm_descdlen_msk, rdm_descdlen_shift, + rx_desc_len); +} + +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor), + rdm_descdreset_msk, rdm_descdreset_shift, + rx_desc_res); +} + +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr, + rdm_int_desc_wrb_en_msk, + rdm_int_desc_wrb_en_shift, + rx_desc_wr_wb_irq_en); +} + +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca), + rdm_dcadhdr_en_msk, + rdm_dcadhdr_en_shift, + rx_head_dca_en); +} + +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca), + rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + rx_pld_dca_en); +} + +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr, + rdm_int_rim_en_msk, + rdm_int_rim_en_shift, + rdm_intr_moder_en); +} + +/* reg */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx) +{ + aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); +} + +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, gen_intr_stat_adr); +} + +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +{ + aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); +} + +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +{ + aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr); +} + +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + rx_dma_desc_base_addrlsw); +} + +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor)); +} + +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + rx_dma_desc_tail_ptr); +} + +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); +} + +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); +} + +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1) +{ + aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); +} + +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2) +{ + aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2); +} + +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue), + rx_intr_moderation_ctl); +} + +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl) +{ + aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); +} + +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + tx_dma_desc_base_addrlsw); +} + +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + tx_dma_desc_base_addrmsw); +} + +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + tx_dma_desc_tail_ptr); +} + +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr, + rpb_dma_sys_lbk_msk, + rpb_dma_sys_lbk_shift, dma_sys_lbk); +} + +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr, + rpb_rpf_rx_tc_mode_msk, + rpb_rpf_rx_tc_mode_shift, + rx_traf_class_mode); +} + +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, + rpb_rx_buf_en_shift, rx_buff_en); +} + +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer), + rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + rx_buff_hi_threshold_per_tc); +} + +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer), + rpb_rxblo_thresh_msk, + rpb_rxblo_thresh_shift, + rx_buff_lo_threshold_per_tc); +} + +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr, + rpb_rx_fc_mode_msk, + rpb_rx_fc_mode_shift, rx_flow_ctl_mode); +} + +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer), + rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + rx_pkt_buff_size_per_tc); +} + +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer), + rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr, + rpfl2bc_thresh_msk, + rpfl2bc_thresh_shift, + l2broadcast_count_threshold); +} + +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, + rpfl2bc_en_shift, l2broadcast_en); +} + +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, + rpfl2bc_act_shift, l2broadcast_flr_act); +} + +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter), + rpfl2mc_enf_msk, + rpfl2mc_enf_shift, l2multicast_flr_en); +} + +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr, + rpfl2promis_mode_msk, + rpfl2promis_mode_shift, + l2promiscuous_mode_en); +} + +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter), + rpfl2uc_actf_msk, rpfl2uc_actf_shift, + l2unicast_flr_act); +} + +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter), + rpfl2uc_enf_msk, + rpfl2uc_enf_shift, l2unicast_flr_en); +} + +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter), + l2unicast_dest_addresslsw); +} + +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter), + rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + l2unicast_dest_addressmsw); +} + +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr, + rpfl2mc_accept_all_msk, + rpfl2mc_accept_all_shift, + l2_accept_all_mc_packets); +} + +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr, + rpf_rss_key_addr_msk, + rpf_rss_key_addr_shift, + rss_key_addr); +} + +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +{ + aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr, + rss_key_wr_data); +} + +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift); +} + +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift, + rss_key_wr_en); +} + +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr, + rpf_rss_redir_addr_msk, + rpf_rss_redir_addr_shift, rss_redir_tbl_addr); +} + +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr, + rpf_rss_redir_wr_data_msk, + rpf_rss_redir_wr_data_shift, + rss_redir_tbl_wr_data); +} + +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift); +} + +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); +} + +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr, + rpf_tpo_rpf_sys_lbk_msk, + rpf_tpo_rpf_sys_lbk_shift, + tpo_to_rpf_sys_lbk); +} + +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr, + rpf_vl_inner_tpid_msk, + rpf_vl_inner_tpid_shift, + vlan_inner_etht); +} + +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr, + rpf_vl_outer_tpid_msk, + rpf_vl_outer_tpid_shift, + vlan_outer_etht); +} + +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr, + rpf_vl_promis_mode_msk, + rpf_vl_promis_mode_shift, + vlan_prom_mode_en); +} + +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr, + rpf_vl_accept_untagged_mode_msk, + rpf_vl_accept_untagged_mode_shift, + vlan_accept_untagged_packets); +} + +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr, + rpf_vl_untagged_act_msk, + rpf_vl_untagged_act_shift, + vlan_untagged_act); +} + +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter), + rpf_vl_en_f_msk, + rpf_vl_en_f_shift, + vlan_flr_en); +} + +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter), + rpf_vl_act_f_msk, + rpf_vl_act_f_shift, + vlan_flr_act); +} + +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter), + rpf_vl_id_f_msk, + rpf_vl_id_f_shift, + vlan_id_flr); +} + +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter), + rpf_et_enf_msk, + rpf_et_enf_shift, etht_flr_en); +} + +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter), + rpf_et_upfen_msk, rpf_et_upfen_shift, + etht_user_priority_en); +} + +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter), + rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + etht_rx_queue_en); +} + +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter), + rpf_et_upf_msk, + rpf_et_upf_shift, etht_user_priority); +} + +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter), + rpf_et_rxqf_msk, + rpf_et_rxqf_shift, etht_rx_queue); +} + +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter), + rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + etht_mgt_queue); +} + +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter), + rpf_et_actf_msk, + rpf_et_actf_shift, etht_flr_act); +} + +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter), + rpf_et_valf_msk, + rpf_et_valf_shift, etht_flr); +} + +/* RPO: rx packet offload */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr, + rpo_ipv4chk_en_msk, + rpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor), + rpo_descdvl_strip_msk, + rpo_descdvl_strip_shift, + rx_desc_vlan_stripping); +} + +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, + rpol4chk_en_shift, tcp_udp_crc_offload_en); +} + +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +{ + aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en); +} + +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr, + rpo_lro_ptopt_en_msk, + rpo_lro_ptopt_en_shift, + lro_patch_optimization_en); +} + +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr, + rpo_lro_qses_lmt_msk, + rpo_lro_qses_lmt_shift, + lro_qsessions_lim); +} + +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr, + rpo_lro_tot_dsc_lmt_msk, + rpo_lro_tot_dsc_lmt_shift, + lro_total_desc_lim); +} + +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr, + rpo_lro_pkt_min_msk, + rpo_lro_pkt_min_shift, + lro_min_pld_of_first_pkt); +} + +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +{ + aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); +} + +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr, + rpo_lro_tb_div_msk, + rpo_lro_tb_div_shift, + lro_time_base_divider); +} + +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr, + rpo_lro_ina_ival_msk, + rpo_lro_ina_ival_shift, + lro_inactive_interval); +} + +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr, + rpo_lro_max_ival_msk, + rpo_lro_max_ival_shift, + lro_max_coalescing_interval); +} + +/* rx */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr, + rx_reg_res_dsbl_msk, + rx_reg_res_dsbl_shift, + rx_reg_res_dis); +} + +/* tdm */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca), + tdm_dcadcpuid_msk, + tdm_dcadcpuid_shift, cpuid); +} + +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) +{ + aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en); +} + +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, + tdm_dca_en_shift, tx_dca_en); +} + +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, + tdm_dca_mode_shift, tx_dca_mode); +} + +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca), + tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + tx_desc_dca_en); +} + +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor), + tdm_descden_msk, + tdm_descden_shift, + tx_desc_en); +} + +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor), + tdm_descdhd_msk, tdm_descdhd_shift); +} + +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor), + tdm_descdlen_msk, + tdm_descdlen_shift, + tx_desc_len); +} + +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr, + tdm_int_desc_wrb_en_msk, + tdm_int_desc_wrb_en_shift, + tx_desc_wr_wb_irq_en); +} + +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor), + tdm_descdwrb_thresh_msk, + tdm_descdwrb_thresh_shift, + tx_desc_wr_wb_threshold); +} + +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr, + tdm_int_mod_en_msk, + tdm_int_mod_en_shift, + tdm_irq_moderation_en); +} + +/* thm */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr, + thm_lso_tcp_flag_first_msk, + thm_lso_tcp_flag_first_shift, + lso_tcp_flag_of_first_pkt); +} + +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr, + thm_lso_tcp_flag_last_msk, + thm_lso_tcp_flag_last_shift, + lso_tcp_flag_of_last_pkt); +} + +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr, + thm_lso_tcp_flag_mid_msk, + thm_lso_tcp_flag_mid_shift, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, + tpb_tx_buf_en_shift, tx_buff_en); +} + +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer), + tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + tx_buff_hi_threshold_per_tc); +} + +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer), + tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + tx_buff_lo_threshold_per_tc); +} + +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr, + tpb_dma_sys_lbk_msk, + tpb_dma_sys_lbk_shift, + tx_dma_sys_lbk_en); +} + +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer), + tpb_txbbuf_size_msk, + tpb_txbbuf_size_shift, + tx_pkt_buff_size_per_tc); +} + +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr, + tpb_tx_scp_ins_en_msk, + tpb_tx_scp_ins_en_shift, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr, + tpo_ipv4chk_en_msk, + tpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr, + tpol4chk_en_msk, + tpol4chk_en_shift, + tcp_udp_crc_offload_en); +} + +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr, + tpo_pkt_sys_lbk_msk, + tpo_pkt_sys_lbk_shift, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr, + tps_data_tc_arb_mode_msk, + tps_data_tc_arb_mode_shift, + tx_pkt_shed_data_arb_mode); +} + +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr, + tps_desc_rate_ta_rst_msk, + tps_desc_rate_ta_rst_shift, + curr_time_res); +} + +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr, + tps_desc_rate_lim_msk, + tps_desc_rate_lim_shift, + tx_pkt_shed_desc_rate_lim); +} + +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr, + tps_desc_tc_arb_mode_msk, + tps_desc_tc_arb_mode_shift, + tx_pkt_shed_desc_tc_arb_mode); +} + +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc), + tps_desc_tctcredit_max_msk, + tps_desc_tctcredit_max_shift, + tx_pkt_shed_desc_tc_max_credit); +} + +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc), + tps_desc_tctweight_msk, + tps_desc_tctweight_shift, + tx_pkt_shed_desc_tc_weight); +} + +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr, + tps_desc_vm_arb_mode_msk, + tps_desc_vm_arb_mode_shift, + tx_pkt_shed_desc_vm_arb_mode); +} + +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc), + tps_data_tctcredit_max_msk, + tps_data_tctcredit_max_shift, + tx_pkt_shed_tc_data_max_credit); +} + +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc), + tps_data_tctweight_msk, + tps_data_tctweight_shift, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr, + tx_reg_res_dsbl_msk, + tx_reg_res_dsbl_shift, tx_reg_res_dis); +} + +/* msm */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr, + msm_reg_access_busy_msk, + msm_reg_access_busy_shift); +} + +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr, + msm_reg_addr_msk, + msm_reg_addr_shift, + reg_addr_for_indirect_addr); +} + +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr, + msm_reg_rd_strobe_msk, + msm_reg_rd_strobe_shift, + reg_rd_strobe); +} + +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr); +} + +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +{ + aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data); +} + +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr, + msm_reg_wr_strobe_msk, + msm_reg_wr_strobe_shift, + reg_wr_strobe); +} + +/* pci */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr, + pci_reg_res_dsbl_msk, + pci_reg_res_dsbl_shift, + pci_reg_res_dis); +} + +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + glb_cpu_scratch_scp); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h new file mode 100644 index 000000000000..ed1085b95adb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -0,0 +1,677 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include <linux/types.h> + +struct aq_hw_s; + +/* global */ + +/* set global microprocessor semaphore */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); + +/* get global microprocessor semaphore */ +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); + +/* set global register reset disable */ +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 glb_soft_res_get(struct aq_hw_s *aq_hw); + +/* stats */ + +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter msw */ +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter msw */ +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter msw */ +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter msw */ +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get msm rx errors counter register */ +u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); + +/* get msm tx errors counter register */ +u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get global mif identification */ +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); + +/* set interrupt mapping enable tx */ +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); + +/* set interrupt mapping rx */ +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); + +/* get reset interrupt */ +u32 itr_res_irq_get(struct aq_hw_s *aq_hw); + +/* set reset interrupt */ +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); + +/* rdm */ + +/* set cpu id */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); + +/* set rx descriptor header buffer size */ +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); + +/* get general interrupt status register */ +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); + +/* set interrupt global control register */ +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); + +/* set rx filter multicast filter mask register */ +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* set global microprocessor scratch pad */ +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, u32 scratch_scp); + +/* rpb */ + +/* set dma system loopback */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); + +/* set rx buffer enable */ +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); + +/* get rss key write enable */ +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss key write enable */ +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss redirection write enable */ +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets); + +/* Set VLAN filter enable */ +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); + +/* Set VLAN Filter Action */ +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); + +/* set ethertype filter enable */ +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); + +/* set ethertype user-priority enable */ +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter); + +/* set ethertype rx queue enable */ +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval); + +/* rx */ + +/* set rx register reset disable */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); + +/* set tx dca enable */ +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); + +/* get tx descriptor head pointer */ +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set tx buffer enable */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); + +/* set tx buffer high threshold (per tc) */ +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); + +/* set tx path pad insert enable */ +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); + +/* set register address for indirect address */ +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); + +/* set register write data */ +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); + +#endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h new file mode 100644 index 000000000000..5527fc0e5942 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -0,0 +1,2375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 + +/* preprocessor definitions for msm rx errors counter register */ +#define mac_msm_rx_errs_cnt_adr 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u + +/* preprocessor definitions for rx dma statistics counter 7 */ +#define rx_dma_stat_counter7_adr 0x00006818u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u + +/* preprocessor definitions for global mif identification */ +#define glb_mif_id_adr 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define itr_iamrlsw_adr 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define itr_imcrlsw_adr 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define itr_imsrlsw_adr 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_adr 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_msk 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_shift 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define itr_iscrlsw_adr 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define itr_isrlsw_adr 0x00002000 +/* register address for bitfield itr_reset */ +#define itr_res_adr 0x00002300 +/* bitmask for bitfield itr_reset */ +#define itr_res_msk 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define itr_res_shift 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_msk 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_shift 0 +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 +/* bitmask for bitfield dca_en */ +#define rdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define rdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define rdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define rdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define rdm_dca_en_default 0x1 + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_adr 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_default 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_msk 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_mskn 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_shift 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_width 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_default 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_default 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define rdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define rdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define rdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define rdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define rdm_descden_default 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_msk 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_mskn 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_shift 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_width 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_default 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_msk 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_mskn 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_shift 28 +/* width of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_width 1 +/* default value of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_default 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_width 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_default 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_msk 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_mskn 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define rdm_descdreset_shift 25 +/* width of bitfield desc{d}_reset */ +#define rdm_descdreset_width 1 +/* default value of bitfield desc{d}_reset */ +#define rdm_descdreset_default 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_adr 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_msk 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_mskn 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_shift 2 +/* width of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_default 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_msk 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_mskn 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_shift 30 +/* width of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_width 1 +/* default value of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_default 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_msk 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_mskn 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_shift 29 +/* width of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_width 1 +/* default value of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_default 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_adr 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_msk 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_mskn 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_shift 3 +/* Width of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_width 1 +/* Default value of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_default 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define gen_intr_stat_adr 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define intr_glb_ctl_adr 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define rx_flr_mcst_flr_msk_adr 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define rx_flr_rss_control1_adr 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define rx_flr_control2_adr 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_adr 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_msk 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_shift 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_width 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_default 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_base_addrlsw_adr(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_adr 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_default 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_adr 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_shift 8 +/* width of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_width 1 +/* default value of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_default 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define rpb_rx_buf_en_adr 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define rpb_rx_buf_en_shift 0 +/* width of bitfield rx_buf_en */ +#define rpb_rx_buf_en_width 1 +/* default value of bitfield rx_buf_en */ +#define rpb_rx_buf_en_default 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_msk 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_mskn 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_shift 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_width 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_default 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_msk 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_mskn 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_shift 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_width 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_default 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_adr 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_msk 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_mskn 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_shift 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_width 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_default 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_msk 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_mskn 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_shift 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_width 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_default 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_msk 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_mskn 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_shift 31 +/* width of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_width 1 +/* default value of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_default 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_adr 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_msk 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_mskn 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_shift 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_width 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_default 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define rpfl2bc_en_adr 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_msk 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_mskn 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define rpfl2bc_en_shift 0 +/* width of bitfield l2_bc_en */ +#define rpfl2bc_en_width 1 +/* default value of bitfield l2_bc_en */ +#define rpfl2bc_en_default 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_adr 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_msk 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_mskn 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_shift 12 +/* width of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_width 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_default 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_shift 31 +/* width of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_width 1 +/* default value of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_default 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define rpfl2promis_mode_adr 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_msk 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_mskn 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define rpfl2promis_mode_shift 3 +/* width of bitfield l2_promis_mode */ +#define rpfl2promis_mode_width 1 +/* default value of bitfield l2_promis_mode */ +#define rpfl2promis_mode_default 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_msk 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_mskn 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_shift 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_width 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_default 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_shift 31 +/* width of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_width 1 +/* default value of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_default 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_msk 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_shift 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_adr 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_msk 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_shift 14 +/* Width of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_width 1 +/* Default value of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_default 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_width 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_default 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_adr 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_msk 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_mskn 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_shift 0 +/* width of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_width 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_default 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_adr 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_shift 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_width 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_default 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_adr 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_msk 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_mskn 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_shift 5 +/* width of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_width 1 +/* default value of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_default 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_adr 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_msk 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_mskn 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_shift 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_width 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_default 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_adr 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_msk 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_mskn 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_shift 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_width 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_default 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_adr 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_msk 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_mskn 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_shift 4 +/* width of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_width 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_default 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_shift 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_width 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_adr 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_msk 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_mskn 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_shift 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_width 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_default 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_adr 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_msk 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_mskn 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_shift 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_width 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_default 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_adr 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_msk 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_mskn 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_shift 1 +/* width of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_width 1 +/* default value of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_default 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_adr 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_msk 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_shift 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_width 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_default 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_adr 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_msk 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_shift 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_width 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_default 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define rpf_vl_en_f_shift 31 +/* Width of bitfield vl_en{F} */ +#define rpf_vl_en_f_width 1 +/* Default value of bitfield vl_en{F} */ +#define rpf_vl_en_f_default 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_msk 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_mskn 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_shift 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_width 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_default 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_msk 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_mskn 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_shift 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_width 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_default 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define rpf_et_en_f_shift 31 +/* Width of bitfield et_en{F} */ +#define rpf_et_en_f_width 1 +/* Default value of bitfield et_en{F} */ +#define rpf_et_en_f_default 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define rpf_et_enf_msk 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define rpf_et_enf_mskn 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define rpf_et_enf_shift 31 +/* width of bitfield et_en{f} */ +#define rpf_et_enf_width 1 +/* default value of bitfield et_en{f} */ +#define rpf_et_enf_default 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_msk 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_mskn 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define rpf_et_upfen_shift 30 +/* width of bitfield et_up{f}_en */ +#define rpf_et_upfen_width 1 +/* default value of bitfield et_up{f}_en */ +#define rpf_et_upfen_default 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_msk 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_mskn 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_shift 29 +/* width of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_width 1 +/* default value of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_default 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_msk 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_mskn 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_shift 26 +/* width of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_width 3 +/* default value of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_default 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_msk 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_mskn 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_shift 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_width 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_default 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_msk 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_mskn 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_shift 19 +/* width of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_width 1 +/* default value of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_default 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_msk 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_mskn 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_shift 16 +/* width of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_width 3 +/* default value of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_default 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_msk 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_mskn 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_shift 0 +/* width of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_width 16 +/* default value of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_default 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_adr 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_default 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_msk 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_mskn 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_shift 29 +/* width of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_width 1 +/* default value of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_default 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define rpol4chk_en_adr 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define rpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define rpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define rpol4chk_en_default 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_adr 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_default 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_msk 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_mskn 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_shift 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_width 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_default 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define tdm_lso_en_adr 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_msk 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_mskn 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define tdm_lso_en_shift 0 +/* width of bitfield lso_en[1f:0] */ +#define tdm_lso_en_width 32 +/* default value of bitfield lso_en[1f:0] */ +#define tdm_lso_en_default 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define tdm_dca_en_adr 0x00008480 +/* bitmask for bitfield dca_en */ +#define tdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define tdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define tdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define tdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define tdm_dca_en_default 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_adr 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_default 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_default 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define tdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define tdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define tdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define tdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define tdm_descden_default 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_width 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_default 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_adr 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_msk 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_mskn 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_shift 1 +/* width of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_default 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_msk 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_mskn 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_shift 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_width 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_default 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_shift 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_width 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_default 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_adr 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_shift 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_width 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_default 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_adr 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_mskn 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_shift 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_width 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_default 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define rpo_lro_en_adr 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_mskn 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define rpo_lro_en_shift 0 +/* Width of bitfield lro_en[1F:0] */ +#define rpo_lro_en_width 32 +/* Default value of bitfield lro_en[1F:0] */ +#define rpo_lro_en_default 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_adr 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_msk 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_shift 15 +/* Width of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_width 1 +/* Default value of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_defalt 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_msk 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_shift 12 +/* Width of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_width 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_default 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_shift 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_width 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_defalt 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_adr 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_msk 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_shift 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_width 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_default 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_width 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_default 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_adr 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_msk 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_mskn 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_shift 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_width 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_default 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_adr 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_msk 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_mskn 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_shift 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_width 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_default 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_adr 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_msk 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_mskn 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_shift 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_width 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_default 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_msk 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_shift 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_width 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_default 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_msk 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_shift 31 +/* Width of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_width 1 +/* Default value of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_default 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_msk 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_mskn 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define tdm_desc_den_shift 31 +/* Width of bitfield desc{D}_en */ +#define tdm_desc_den_width 1 +/* Default value of bitfield desc{D}_en */ +#define tdm_desc_den_default 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_msk 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_mskn 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_shift 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_width 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_msk 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_mskn 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_shift 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_width 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_default 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_adr(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_msk 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_shift 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_width 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_default 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_adr 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_msk 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_mskn 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_shift 4 +/* Width of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_width 1 +/* Default value of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_default 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_shift 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_width 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_default 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define tpb_tx_buf_en_adr 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define tpb_tx_buf_en_shift 0 +/* width of bitfield tx_buf_en */ +#define tpb_tx_buf_en_width 1 +/* default value of bitfield tx_buf_en */ +#define tpb_tx_buf_en_default 0x0 + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_msk 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_mskn 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_shift 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_width 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_default 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_msk 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_mskn 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_shift 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_width 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_default 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_adr 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_default 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_msk 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_mskn 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_shift 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_width 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_default 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_adr 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_msk 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_mskn 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_shift 2 +/* width of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_width 1 +/* default value of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_default 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_adr 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_default 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define tpol4chk_en_adr 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define tpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define tpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define tpol4chk_en_default 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_adr 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_msk 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_mskn 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_shift 7 +/* width of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_width 1 +/* default value of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_default 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_adr 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_shift 0 +/* width of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_width 1 +/* default value of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_default 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_adr 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_msk 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_mskn 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_shift 31 +/* width of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_width 1 +/* default value of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_default 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_adr 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_msk 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_mskn 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_shift 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_width 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_default 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_adr 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_msk 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_mskn 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_shift 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_width 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_default 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_shift 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_width 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_default 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_shift 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_width 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_default 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_adr 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_shift 0 +/* width of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_width 1 +/* default value of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_default 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_shift 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_width 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_default 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_shift 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_width 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_default 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_adr 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_default 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define msm_reg_access_busy_adr 0x00004400 +/* bitmask for bitfield register access busy */ +#define msm_reg_access_busy_msk 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define msm_reg_access_busy_mskn 0xffffefff +/* lower bit position of bitfield register access busy */ +#define msm_reg_access_busy_shift 12 +/* width of bitfield register access busy */ +#define msm_reg_access_busy_width 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define msm_reg_addr_adr 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_msk 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_mskn 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define msm_reg_addr_shift 0 +/* width of bitfield msm register address[7:0] */ +#define msm_reg_addr_width 8 +/* default value of bitfield msm register address[7:0] */ +#define msm_reg_addr_default 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define msm_reg_rd_strobe_adr 0x00004400 +/* bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_msk 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_mskn 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define msm_reg_rd_strobe_shift 9 +/* width of bitfield register read strobe */ +#define msm_reg_rd_strobe_width 1 +/* default value of bitfield register read strobe */ +#define msm_reg_rd_strobe_default 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_adr 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_mskn 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_shift 0 +/* width of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_width 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_adr 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_mskn 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_shift 0 +/* width of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_width 32 +/* default value of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_default 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define msm_reg_wr_strobe_adr 0x00004400 +/* bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_msk 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_mskn 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define msm_reg_wr_strobe_shift 8 +/* width of bitfield register write strobe */ +#define msm_reg_wr_strobe_width 1 +/* default value of bitfield register write strobe */ +#define msm_reg_wr_strobe_default 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define glb_soft_res_adr 0x00000000 +/* bitmask for bitfield soft reset */ +#define glb_soft_res_msk 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define glb_soft_res_mskn 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define glb_soft_res_shift 15 +/* width of bitfield soft reset */ +#define glb_soft_res_width 1 +/* default value of bitfield soft reset */ +#define glb_soft_res_default 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define glb_reg_res_dis_adr 0x00000000 +/* bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_msk 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_mskn 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define glb_reg_res_dis_shift 14 +/* width of bitfield register reset disable */ +#define glb_reg_res_dis_width 1 +/* default value of bitfield register reset disable */ +#define glb_reg_res_dis_default 0x1 + +/* tx dma debug control definitions */ +#define tx_dma_debug_ctl_adr 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define tx_dma_desc_base_addrmsw_adr(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_adr 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_default 0x1 + +/* global microprocessor scratch pad definitions */ +#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) + +#endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c new file mode 100644 index 000000000000..8d6d8f5804da --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -0,0 +1,570 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware + * abstraction layer. + */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_pci_func.h" +#include "../aq_ring.h" +#include "../aq_vec.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#include <linux/random.h> + +#define HW_ATL_UCP_0X370_REG 0x0370U + +#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_MPI_STATE_ADR 0x036CU + +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_MPI_SPEED_SHIFT 16U + +static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x00000200U, 0x00008000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + + *(p++) = aq_hw_read_reg(self, 0x0000020CU); + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt) +{ + int err = 0; + bool is_locked; + + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x0000020CU, *(p++)); + aq_hw_write_reg(self, 0x00000200U, 0xC000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +{ + int err = 0; + const u32 dw_major_mask = 0xff000000U; + const u32 dw_minor_mask = 0x00ffffffU; + + err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; + if (err < 0) + goto err_exit; + err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + -EOPNOTSUPP : 0; +err_exit: + return err; +} + +static int hw_atl_utils_init_ucp(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + if (!aq_hw_read_reg(self, 0x370U)) { + unsigned int rnd = 0U; + unsigned int ucp_0x370 = 0U; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + + err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, + aq_hw_read_reg(self, 0x18U)); + return err; +} + +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + +struct aq_hw_atl_utils_fw_rpc_tid_s { + union { + u32 val; + struct { + u16 tid; + u16 len; + }; + }; +}; + +#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) + +static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + + if (!IS_CHIP_FEATURE(MIPS)) { + err = -1; + goto err_exit; + } + err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *)&PHAL_ATLANTIC->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); + if (err < 0) + goto err_exit; + + sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); + sw.len = (u16)rpc_size; + aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + struct aq_hw_atl_utils_fw_rpc_tid_s fw; + + do { + sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); + + PHAL_ATLANTIC->rpc_tid = sw.tid; + + AQ_HW_WAIT_FOR(sw.tid == + (fw.val = + aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), + fw.tid), 1000U, 100U); + if (err < 0) + goto err_exit; + + if (fw.len == 0xFFFFU) { + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; + } + } while (sw.tid != fw.tid || 0xFFFFU == fw.len); + if (err < 0) + goto err_exit; + + if (rpc) { + if (fw.len) { + err = + hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *) + &PHAL_ATLANTIC->rpc, + (fw.len + sizeof(u32) - + sizeof(u8)) / + sizeof(u32)); + if (err < 0) + goto err_exit; + } + + *rpc = &PHAL_ATLANTIC->rpc; + } + +err_exit: + return err; +} + +static int hw_atl_utils_mpi_create(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + err = hw_atl_utils_init_ucp(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + err = hw_atl_utils_fw_rpc_init(self); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox) +{ + int err = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); + if (err < 0) + goto err_exit; + + if (pmbox != &PHAL_ATLANTIC->mbox) + memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); + + if (IS_CHIP_FEATURE(REVISION_A0)) { + unsigned int mtu = self->aq_nic_cfg ? + self->aq_nic_cfg->mtu : 1514U; + pmbox->stats.ubrc = pmbox->stats.uprc * mtu; + pmbox->stats.ubtc = pmbox->stats.uptc * mtu; + pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); + } else { + pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); + } + +err_exit:; +} + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state) +{ + u32 ucp_0x368 = 0; + + ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); + + return 0; +} + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, u32 speed) +{ + int err = 0; + u32 transaction_id = 0; + + if (state == MPI_RESET) { + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + transaction_id = PHAL_ATLANTIC->mbox.transaction_id; + + AQ_HW_WAIT_FOR(transaction_id != + (hw_atl_utils_mpi_read_stats + (self, &PHAL_ATLANTIC->mbox), + PHAL_ATLANTIC->mbox.transaction_id), + 1000U, 100U); + if (err < 0) + goto err_exit; + } + + err = hw_atl_utils_mpi_set_speed(self, speed, state); + +err_exit:; +} + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status) +{ + u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + + if (!link_speed_mask) { + link_status->mbps = 0U; + } else { + switch (link_speed_mask) { + case HAL_ATLANTIC_RATE_10G: + link_status->mbps = 10000U; + break; + + case HAL_ATLANTIC_RATE_5G: + case HAL_ATLANTIC_RATE_5GSR: + link_status->mbps = 5000U; + break; + + case HAL_ATLANTIC_RATE_2GS: + link_status->mbps = 2500U; + break; + + case HAL_ATLANTIC_RATE_1G: + link_status->mbps = 1000U; + break; + + case HAL_ATLANTIC_RATE_100M: + link_status->mbps = 100U; + break; + + default: + link_status->mbps = 0U; + break; + } + } + + return 0; +} + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2]; + + self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + err = hw_atl_utils_mpi_create(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { + unsigned int rnd = 0; + unsigned int ucp_0x370 = 0; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + err = hw_atl_utils_fw_downld_dwords(self, + aq_hw_read_reg(self, 0x00000374U) + + (40U * 4U), + mac_addr, + AQ_DIMOF(mac_addr)); + if (err < 0) { + mac_addr[0] = 0U; + mac_addr[1] = 0U; + err = 0; + } else { + mac_addr[0] = __swab32(mac_addr[0]); + mac_addr[1] = __swab32(mac_addr[1]); + } + + ether_addr_copy(mac, (u8 *)mac_addr); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + +err_exit: + return err; +} + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) +{ + unsigned int ret = 0U; + + switch (mbps) { + case 100U: + ret = 5U; + break; + + case 1000U: + ret = 4U; + break; + + case 2500U: + ret = 3U; + break; + + case 5000U: + ret = 1U; + break; + + case 10000U: + ret = 0U; + break; + + default: + break; + } + return ret; +} + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) +{ + u32 chip_features = 0U; + u32 val = reg_glb_mif_id_get(self); + u32 mif_rev = val & 0xFFU; + + if ((3U & mif_rev) == 1U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS; + } else if ((3U & mif_rev) == 2U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } + + *p = chip_features; +} + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +{ + hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + return 0; +} + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state) +{ + hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + return 0; +} + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, unsigned int *p_count) +{ + struct hw_atl_stats_s *stats = NULL; + int i = 0; + + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + stats = &PHAL_ATLANTIC->mbox.stats; + + data[i] = stats->uprc + stats->mprc + stats->bprc; + data[++i] = stats->uprc; + data[++i] = stats->mprc; + data[++i] = stats->bprc; + data[++i] = stats->erpt; + data[++i] = stats->uptc + stats->mptc + stats->bptc; + data[++i] = stats->uptc; + data[++i] = stats->mptc; + data[++i] = stats->bptc; + data[++i] = stats->ubrc; + data[++i] = stats->ubtc; + data[++i] = stats->mbrc; + data[++i] = stats->mbtc; + data[++i] = stats->bbrc; + data[++i] = stats->bbtc; + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); + data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); + data[++i] = stats->dpc; + + if (p_count) + *p_count = ++i; + + return 0; +} + +static const u32 hw_atl_utils_hw_mac_regs[] = { + 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, + 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, + 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, + 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, + 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, + 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, + 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, + 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, + 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, + 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, + 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, + 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, + 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, + 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, + 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, + 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, + 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, + 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, + 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, + 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, + 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, + 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, +}; + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff) +{ + unsigned int i = 0U; + + for (i = 0; i < aq_hw_caps->mac_regs_count; i++) + regs_buff[i] = aq_hw_read_reg(self, + hw_atl_utils_hw_mac_regs[i]); + return 0; +} + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) +{ + *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h new file mode 100644 index 000000000000..b8e3d88f0879 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -0,0 +1,210 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware + * abstraction layer. + */ + +#ifndef HW_ATL_UTILS_H +#define HW_ATL_UTILS_H + +#include "../aq_common.h" + +#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } + +struct __packed hw_atl_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 dpc; +}; + +union __packed ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +}; + +struct __packed hw_aq_atl_utils_fw_rpc { + u32 msg_id; + + union { + struct { + u32 pong; + } msg_ping; + + struct { + u8 mac_addr[6]; + u32 ip_addr_cnt; + + struct { + union ip_addr addr; + union ip_addr mask; + } ip[1]; + } msg_arp; + + struct { + u32 len; + u8 packet[1514U]; + } msg_inject; + + struct { + u32 priority; + u32 wol_packet_type; + u16 friendly_name_len; + u16 friendly_name[65]; + u32 pattern_id; + u32 next_wol_pattern_offset; + + union { + struct { + u32 flags; + u8 ipv4_source_address[4]; + u8 ipv4_dest_address[4]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv4_tcp_syn_parameters; + + struct { + u32 flags; + u8 ipv6_source_address[16]; + u8 ipv6_dest_address[16]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv6_tcp_syn_parameters; + + struct { + u32 flags; + } eapol_request_id_message_parameters; + + struct { + u32 flags; + u32 mask_offset; + u32 mask_size; + u32 pattern_offset; + u32 pattern_size; + } wol_bit_map_pattern; + } wol_pattern; + } msg_wol; + + struct { + u32 is_wake_on_link_down; + u32 is_wake_on_link_up; + } msg_wolink; + }; +}; + +struct __packed hw_aq_atl_utils_mbox { + u32 version; + u32 transaction_id; + int error; + struct hw_atl_stats_s stats; +}; + +struct __packed hw_atl_s { + struct aq_hw_s base; + struct hw_aq_atl_utils_mbox mbox; + u64 speed; + u32 itr_tx; + u32 itr_rx; + unsigned int chip_features; + u32 fw_ver_actual; + atomic_t dpc; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; +}; + +#define SELF ((struct hw_atl_s *)self) + +#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self))) + +#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U +#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U +#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U +#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U + +#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ + PHAL_ATLANTIC->chip_features) + +enum hal_atl_utils_fw_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +#define HAL_ATLANTIC_RATE_10G BIT(0) +#define HAL_ATLANTIC_RATE_5G BIT(1) +#define HAL_ATLANTIC_RATE_5GSR BIT(2) +#define HAL_ATLANTIC_RATE_2GS BIT(3) +#define HAL_ATLANTIC_RATE_1G BIT(4) +#define HAL_ATLANTIC_RATE_100M BIT(5) +#define HAL_ATLANTIC_RATE_INVALID BIT(6) + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox); + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, + u32 speed); + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state); + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff); + +int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, + struct ethtool_cmd *cmd); + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state); + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self); + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, + unsigned int *p_count); + +#endif /* HW_ATL_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h new file mode 100644 index 000000000000..0de858d215c2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -0,0 +1,18 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef VER_H +#define VER_H + +#define NIC_MAJOR_DRIVER_VERSION 1 +#define NIC_MINOR_DRIVER_VERSION 5 +#define NIC_BUILD_DRIVER_VERSION 345 +#define NIC_REVISION_DRIVER_VERSION 0 + +#endif /* VER_H */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abc9f2a59054..23873395f100 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4c80e0689db9..6a27c2662675 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget) if (!tx_complete || work == budget) return budget; - napi_complete(&np->napi); + napi_complete_done(&np->napi, work); /* enable interrupt */ if (alx->flags & ALX_FLAG_USING_MSIX) { @@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx) return -ENOMEM; } - alx_reinit_rings(alx); - return 0; } @@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx) if (alx->qnapi[0] && alx->qnapi[0]->rxq) kfree(alx->qnapi[0]->rxq->bufs); - if (!alx->descmem.virt) + if (alx->descmem.virt) dma_free_coherent(&alx->hw.pdev->dev, alx->descmem.size, alx->descmem.virt, @@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx) alx_free_rings(alx); alx_free_napis(alx); alx_disable_advanced_intr(alx); + alx_init_intr(alx, false); err = alx_alloc_napis(alx); if (err) @@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) if (err) goto out_free_rings; + /* must be called after alx_request_irq because the chip stops working + * if we copy the dma addresses in alx_init_ring_ptrs twice when + * requesting msi-x interrupts failed + */ + alx_reinit_rings(alx); + netif_set_real_num_tx_queues(alx->dev, alx->num_txq); netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 773d3b7d8dd5..7e913d8331c3 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index e96091b652a7..ef003c522ba2 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 7dad8e4b9d2a..022772e1e249 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) if (work_done >= budget) return work_done; - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable Interrupt */ if (likely(adapter->int_enabled)) atlx_imr_set(adapter, IMR_NORMAL_MASK); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 7aef70f7d8ef..5b95bb48ce97 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); b44_enable_ints(bp); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3b14d5144228..0ee6e208aa07 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) /* no more packet in rx/tx queue, remove device from poll * queue */ - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* restore rx/tx interrupt */ enet_dmac_writel(priv, priv->dma_chan_int_mask, @@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev) priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; + } else { + phydev = NULL; } /* mask all interrupts and request them */ @@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev) enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IRMASK, priv->tx_chan); - if (priv->has_phy) + if (phydev) phy_start(phydev); else bcm_enet_adjust_link(dev); @@ -1126,7 +1128,7 @@ out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: - if (priv->has_phy) + if (phydev) phy_disconnect(phydev); return ret; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7e8cf213fd81..a68d4889f5db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); -BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); +/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact + * same layout, except it has been moved by 4 bytes up, *sigh* + */ +static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) +{ + if (!priv->is_lite) { + return BIT(bit); + } else { + if (bit >= ACB_ALGO) + return BIT(bit + 1); + else + return BIT(bit); + } +} + /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. */ @@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); reg = tdma_readl(priv, TDMA_CONTROL); if (priv->tsb_en) - reg |= TSB_EN; + reg |= tdma_control_bit(priv, TSB_EN); else - reg &= ~TSB_EN; + reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); return 0; @@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) priv->msg_enable = enable; } +static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) +{ + switch (type) { + case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_RXCHK: + case BCM_SYSPORT_STAT_RBUF: + case BCM_SYSPORT_STAT_SOFT: + return true; + default: + return false; + } +} + static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) { + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + unsigned int i, j; + switch (string_set) { case ETH_SS_STATS: - return BCM_SYSPORT_STATS_LEN; + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + j++; + } + return j; default: return -EOPNOTSUPP; } @@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) static void bcm_sysport_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - int i; + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sysport_gstrings_stats[i].stat_string, + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + + memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, ETH_GSTRING_LEN); + j++; } break; default: @@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: case BCM_SYSPORT_STAT_RUNT: + if (priv->is_lite) + continue; + if (s->type != BCM_SYSPORT_STAT_MIB_RX) offset = UMAC_MIB_STAT_OFFSET; val = umac_readl(priv, UMAC_MIB_START + j + offset); @@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); - int i; + int i, j; if (netif_running(dev)) bcm_sysport_update_mib_counters(priv); - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; char *p; @@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(unsigned long *)p; + data[j] = *(unsigned long *)p; + j++; } } @@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, u16 len, status; struct bcm_rsb *rsb; - /* Determine how much we should process since last call */ - p_index = rdma_readl(priv, RDMA_PROD_INDEX); + /* Determine how much we should process since last call, SYSTEMPORT Lite + * groups the producer and consumer indexes into the same 32-bit + * which we access using RDMA_CONS_INDEX + */ + if (!priv->is_lite) + p_index = rdma_readl(priv, RDMA_PROD_INDEX); + else + p_index = rdma_readl(priv, RDMA_CONS_INDEX); p_index &= RDMA_PROD_INDEX_MASK; if (p_index < priv->rx_c_index) @@ -710,11 +780,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct bcm_sysport_cb *cb; - struct netdev_queue *txq; u32 hw_ind; - txq = netdev_get_tx_queue(ndev, ring->index); - /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; @@ -745,9 +812,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ring->c_index = c_index; - if (netif_tx_queue_stopped(txq) && pkts_compl) - netif_tx_wake_queue(txq); - netif_dbg(priv, tx_done, ndev, "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", ring->index, ring->c_index, pkts_compl, bytes_compl); @@ -759,16 +823,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { + struct netdev_queue *txq; unsigned int released; unsigned long flags; + txq = netdev_get_tx_queue(priv->netdev, ring->index); + spin_lock_irqsave(&ring->lock, flags); released = __bcm_sysport_tx_reclaim(priv, ring); + if (released) + netif_tx_wake_queue(txq); + spin_unlock_irqrestore(&ring->lock, flags); return released; } +/* Locked version of the per-ring TX reclaim, but does not wake the queue */ +static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + __bcm_sysport_tx_reclaim(priv, ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_tx_ring *ring = @@ -780,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ - intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + if (!ring->priv->is_lite) + intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + else + intrl2_0_mask_clear(ring->priv, BIT(ring->index + + INTRL2_0_TDMA_MBDONE_SHIFT)); return 0; } @@ -806,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) priv->rx_c_index += work_done; priv->rx_c_index &= RDMA_CONS_INDEX_MASK; - rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + + /* SYSTEMPORT Lite groups the producer/consumer index, producer is + * maintained by HW, but writes to it will be ignore while RDMA + * is active + */ + if (!priv->is_lite) + rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + else + rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -837,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *txr; + unsigned int ring, ring_bit; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -866,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) bcm_sysport_resume_from_wol(priv); } + if (!priv->is_lite) + goto out; + + for (ring = 0; ring < dev->num_tx_queues; ring++) { + ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); + if (!(priv->irq0_stat & ring_bit)) + continue; + + txr = &priv->tx_rings[ring]; + + if (likely(napi_schedule_prep(&txr->napi))) { + intrl2_0_mask_set(priv, ring_bit); + __napi_schedule(&txr->napi); + } + } +out: return IRQ_HANDLED; } @@ -919,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev) bcm_sysport_rx_isr(priv->irq0, priv); enable_irq(priv->irq0); - disable_irq(priv->irq1); - bcm_sysport_tx_isr(priv->irq1, priv); - enable_irq(priv->irq1); + if (!priv->is_lite) { + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); + } } #endif @@ -1118,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) priv->old_duplex = phydev->duplex; } + if (priv->is_lite) + goto out; + switch (phydev->speed) { case SPEED_2500: cmd_bits = CMD_SPEED_2500; @@ -1158,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); } - - phy_print_status(phydev); +out: + if (changed) + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1252,7 +1369,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, napi_disable(&ring->napi); netif_napi_del(&ring->napi); - bcm_sysport_tx_reclaim(priv, ring); + bcm_sysport_tx_clean(priv, ring); kfree(ring->cbs); ring->cbs = NULL; @@ -1304,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, reg = tdma_readl(priv, TDMA_CONTROL); if (enable) - reg |= TDMA_EN; + reg |= tdma_control_bit(priv, TDMA_EN); else - reg &= ~TDMA_EN; + reg &= ~tdma_control_bit(priv, TDMA_EN); tdma_writel(priv, reg, TDMA_CONTROL); /* Poll for TMDA disabling completion */ @@ -1331,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) int i; /* Initialize SW view of the RX ring */ - priv->num_rx_bds = NUM_RX_DESC; + priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; priv->rx_c_index = 0; priv->rx_read_ptr = 0; @@ -1368,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_START_ADDR_HI); rdma_writel(priv, 0, RDMA_START_ADDR_LO); rdma_writel(priv, 0, RDMA_END_ADDR_HI); - rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); rdma_writel(priv, 1, RDMA_MBDONE_INTR); @@ -1410,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); if (dev->flags & IFF_PROMISC) reg |= CMD_PROMISC; @@ -1427,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, { u32 reg; - reg = umac_readl(priv, UMAC_CMD); - if (enable) - reg |= mask; - else - reg &= ~mask; - umac_writel(priv, reg, UMAC_CMD); + if (!priv->is_lite) { + reg = umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_writel(priv, reg, UMAC_CMD); + } else { + reg = gib_readl(priv, GIB_CONTROL); + if (enable) + reg |= mask; + else + reg &= ~mask; + gib_writel(priv, reg, GIB_CONTROL); + } /* UniMAC stops on a packet boundary, wait for a full-sized packet * to be processed (1 msec). @@ -1445,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) { u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); reg |= CMD_SW_RESET; umac_writel(priv, reg, UMAC_CMD); @@ -1457,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) static void umac_set_hw_addr(struct bcm_sysport_priv *priv, unsigned char *addr) { - umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | - (addr[2] << 8) | addr[3], UMAC_MAC0); - umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + if (!priv->is_lite) { + umac_writel(priv, mac0, UMAC_MAC0); + umac_writel(priv, mac1, UMAC_MAC1); + } else { + gib_writel(priv, mac0, GIB_MAC0); + gib_writel(priv, mac1, GIB_MAC1); + } } static void topctrl_flush(struct bcm_sysport_priv *priv) @@ -1504,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev) phy_start(dev->phydev); - /* Enable TX interrupts for the 32 TXQs */ - intrl2_1_mask_clear(priv, 0xffffffff); + /* Enable TX interrupts for the TXQs */ + if (!priv->is_lite) + intrl2_1_mask_clear(priv, 0xffffffff); + else + intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); /* Last call before we start the real business */ netif_tx_start_all_queues(dev); @@ -1517,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; + /* Set a correct RSB format on SYSTEMPORT Lite */ + if (priv->is_lite) { + reg &= ~RBUF_RSB_SWAP1; + reg |= RBUF_RSB_SWAP0; + } rbuf_writel(priv, reg, RBUF_CONTROL); } +static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) +{ + intrl2_0_mask_set(priv, 0xffffffff); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + if (!priv->is_lite) { + intrl2_1_mask_set(priv, 0xffffffff); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + } +} + +static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) +{ + u32 __maybe_unused reg; + + /* Include Broadcom tag in pad extension */ + if (netdev_uses_dsa(priv->netdev)) { + reg = gib_readl(priv, GIB_CONTROL); + reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); + reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; + gib_writel(priv, reg, GIB_CONTROL); + } +} + static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1540,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); /* Read CRC forward */ - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); @@ -1561,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev) priv->old_pause = -1; /* mask all interrupts and request them */ - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + bcm_sysport_mask_all_intrs(priv); ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); if (ret) { @@ -1574,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev) goto out_phy_disconnect; } - ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); - if (ret) { - netdev_err(dev, "failed to request TX interrupt\n"); - goto out_free_irq0; + if (!priv->is_lite) { + ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, + dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request TX interrupt\n"); + goto out_free_irq0; + } } /* Initialize both hardware and software ring */ @@ -1624,7 +1800,8 @@ out_free_rx_ring: out_free_tx_ring: for (i = 0; i < dev->num_tx_queues; i++) bcm_sysport_fini_tx_ring(priv, i); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: @@ -1642,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) phy_stop(dev->phydev); /* mask all interrupts */ - intrl2_0_mask_set(priv, 0xffffffff); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_mask_set(priv, 0xffffffff); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + bcm_sysport_mask_all_intrs(priv); } static int bcm_sysport_stop(struct net_device *dev) @@ -1683,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev) bcm_sysport_fini_rx_ring(priv); free_irq(priv->irq0, dev); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); /* Disconnect from PHY */ phy_disconnect(dev->phydev); @@ -1722,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #define REV_FMT "v%2x.%02x" +static const struct bcm_sysport_hw_params bcm_sysport_params[] = { + [SYSTEMPORT] = { + .is_lite = false, + .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, + }, + [SYSTEMPORT_LITE] = { + .is_lite = true, + .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, + }, +}; + +static const struct of_device_id bcm_sysport_of_match[] = { + { .compatible = "brcm,systemportlite-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, + { .compatible = "brcm,systemport-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { .compatible = "brcm,systemport", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); + static int bcm_sysport_probe(struct platform_device *pdev) { + const struct bcm_sysport_hw_params *params; + const struct of_device_id *of_id = NULL; struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; @@ -1734,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) dn = pdev->dev.of_node; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + of_id = of_match_node(bcm_sysport_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + /* Fairly quickly we need to know the type of adapter we have */ + params = of_id->data; /* Read the Transmit/Receive Queue properties */ if (of_property_read_u32(dn, "systemport,num-txq", &txq)) @@ -1741,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) rxq = 1; + /* Sanity check the number of transmit queues */ + if (!txq || txq > TDMA_NUM_RINGS) + return -EINVAL; + dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); if (!dev) return -ENOMEM; @@ -1748,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* Initialize private members */ priv = netdev_priv(dev); + /* Allocate number of TX rings */ + priv->tx_rings = devm_kcalloc(&pdev->dev, txq, + sizeof(struct bcm_sysport_tx_ring), + GFP_KERNEL); + if (!priv->tx_rings) + return -ENOMEM; + + priv->is_lite = params->is_lite; + priv->num_rx_desc_words = params->num_rx_desc_words; + priv->irq0 = platform_get_irq(pdev, 0); - priv->irq1 = platform_get_irq(pdev, 1); + if (!priv->is_lite) + priv->irq1 = platform_get_irq(pdev, 1); priv->wol_irq = platform_get_irq(pdev, 2); - if (priv->irq0 <= 0 || priv->irq1 <= 0) { + if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { dev_err(&pdev->dev, "invalid interrupts\n"); ret = -EINVAL; goto err_free_netdev; @@ -1825,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, - "Broadcom SYSTEMPORT" REV_FMT + "Broadcom SYSTEMPORT%s" REV_FMT " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + priv->is_lite ? " Lite" : "", (priv->rev >> 8) & 0xff, priv->rev & 0xff, priv->base, priv->irq0, priv->irq1, txq, rxq); @@ -2022,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); @@ -2058,13 +2282,6 @@ out_free_tx_rings: static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, bcm_sysport_suspend, bcm_sysport_resume); -static const struct of_device_id bcm_sysport_of_match[] = { - { .compatible = "brcm,systemport-v1.00" }, - { .compatible = "brcm,systemport" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); - static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, .remove = bcm_sysport_remove, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 1c82e3da69a7..863ddd7870b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -127,6 +127,10 @@ struct bcm_rsb { #define INTRL2_0_DESC_ALLOC_ERR (1 << 10) #define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11) +/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */ +#define INTRL2_0_TDMA_MBDONE_SHIFT 12 +#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT) + /* RXCHK offset and defines */ #define SYS_PORT_RXCHK_OFFSET 0x300 @@ -176,7 +180,9 @@ struct bcm_rsb { #define RBUF_OK_TO_SEND_MASK 0xff #define RBUF_CRC_REPLACE (1 << 20) #define RBUF_OK_TO_SEND_MODE (1 << 21) -#define RBUF_RSB_SWAP (1 << 22) +/* SYSTEMPORT Lite uses two bits here */ +#define RBUF_RSB_SWAP0 (1 << 22) +#define RBUF_RSB_SWAP1 (1 << 23) #define RBUF_ACPI_EN (1 << 23) #define RBUF_PKT_RDY_THRESH 0x04 @@ -247,6 +253,7 @@ struct bcm_rsb { #define MIB_RUNT_CNT_RST (1 << 1) #define MIB_TX_CNT_RST (1 << 2) +/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */ #define UMAC_MPD_CTRL 0x620 #define MPD_EN (1 << 0) #define MSEQ_LEN_SHIFT 16 @@ -258,6 +265,34 @@ struct bcm_rsb { #define UMAC_MDF_CTRL 0x650 #define UMAC_MDF_ADDR 0x654 +/* Only valid on SYSTEMPORT Lite */ +#define SYS_PORT_GIB_OFFSET 0x1000 + +#define GIB_CONTROL 0x00 +#define GIB_TX_EN (1 << 0) +#define GIB_RX_EN (1 << 1) +#define GIB_TX_FLUSH (1 << 2) +#define GIB_RX_FLUSH (1 << 3) +#define GIB_GTX_CLK_SEL_SHIFT 4 +#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_FCS_STRIP (1 << 6) +#define GIB_LCL_LOOP_EN (1 << 7) +#define GIB_LCL_LOOP_TXEN (1 << 8) +#define GIB_RMT_LOOP_EN (1 << 9) +#define GIB_RMT_LOOP_RXEN (1 << 10) +#define GIB_RX_PAUSE_EN (1 << 11) +#define GIB_PREAMBLE_LEN_SHIFT 12 +#define GIB_PREAMBLE_LEN_MASK 0xf +#define GIB_IPG_LEN_SHIFT 16 +#define GIB_IPG_LEN_MASK 0x3f +#define GIB_PAD_EXTENSION_SHIFT 22 +#define GIB_PAD_EXTENSION_MASK 0x3f + +#define GIB_MAC1 0x08 +#define GIB_MAC0 0x0c + /* Receive DMA offset and defines */ #define SYS_PORT_RDMA_OFFSET 0x2000 @@ -409,16 +444,19 @@ struct bcm_rsb { RING_PCP_DEI_VID) #define TDMA_CONTROL 0x600 -#define TDMA_EN (1 << 0) -#define TSB_EN (1 << 1) -#define TSB_SWAP (1 << 2) -#define ACB_ALGO (1 << 3) +#define TDMA_EN 0 +#define TSB_EN 1 +/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we + * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() + */ +#define TSB_SWAP 2 +#define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff -#define VLAN_EN (1 << 14) -#define SW_BRCM_TAG (1 << 15) -#define WNC_KPT_SIZE_UPDATE (1 << 16) -#define SYNC_PKT_SIZE (1 << 17) +#define VLAN_EN 14 +#define SW_BRCM_TAG 15 +#define WNC_KPT_SIZE_UPDATE 16 +#define SYNC_PKT_SIZE 17 #define ACH_TXDONE_DELAY_SHIFT 18 #define ACH_TXDONE_DELAY_MASK 0xff @@ -475,12 +513,12 @@ struct dma_desc { }; /* Number of Receive hardware descriptor words */ -#define NUM_HW_RX_DESC_WORDS 1024 -/* Real number of usable descriptors */ -#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) +#define SP_NUM_HW_RX_DESC_WORDS 1024 +#define SP_LT_NUM_HW_RX_DESC_WORDS 256 -/* Internal linked-list RAM has up to 1536 entries */ -#define NUM_TX_DESC 1536 +/* Internal linked-list RAM size */ +#define SP_NUM_TX_DESC 1536 +#define SP_LT_NUM_TX_DESC 256 #define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) @@ -627,6 +665,16 @@ struct bcm_sysport_cb { DEFINE_DMA_UNMAP_LEN(dma_len); }; +enum bcm_sysport_type { + SYSTEMPORT = 0, + SYSTEMPORT_LITE, +}; + +struct bcm_sysport_hw_params { + bool is_lite; + unsigned int num_rx_desc_words; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -651,6 +699,8 @@ struct bcm_sysport_priv { u32 irq0_mask; u32 irq1_stat; u32 irq1_mask; + bool is_lite; + unsigned int num_rx_desc_words; struct napi_struct napi ____cacheline_aligned; struct net_device *netdev; struct platform_device *pdev; @@ -659,7 +709,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ - struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ void __iomem *rx_bds; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 7c19c8e2bf91..6ce80cbcb48e 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -12,11 +12,6 @@ #include <linux/brcmphy.h> #include "bgmac.h" -struct bcma_mdio { - struct bcma_device *core; - u8 phyaddr; -}; - static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, * PHY ops **************************************************/ -static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) { struct bcma_device *core; u16 phy_access_addr; @@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, +static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) { struct bcma_device *core; @@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, u16 phy_ctl_addr; u32 tmp; - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, tmp |= phyaddr; bcma_write32(core, phy_ctl_addr, tmp); - bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) dev_warn(&core->dev, "Error setting MDIO int\n"); tmp = BGMAC_PA_START; @@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +static void bcma_mdio_phy_init(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo; u8 i; + /* For some legacy hardware we do chipset-based PHY initialization here + * without even detecting PHY ID. It's hacky and should be cleaned as + * soon as someone can test it. + */ if (ci->id == BCMA_CHIP_ID_BCM5356) { for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); - bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } + + /* For all other hw do initialization using PHY subsystem. */ + if (bgmac->net_dev && bgmac->net_dev->phydev) + phy_init_hw(bgmac->net_dev->phydev); } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ static int bcma_mdio_phy_reset(struct mii_bus *bus) { - struct bcma_mdio *bcma_mdio = bus->priv; - u8 phyaddr = bcma_mdio->phyaddr; + struct bgmac *bgmac = bus->priv; + u8 phyaddr = bgmac->phyaddr; - if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + if (phyaddr == BGMAC_PHY_NOREGS) return 0; - bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET); udelay(100); - if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) - dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); - bcma_mdio_phy_init(bcma_mdio); + if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(bgmac->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bgmac); return 0; } @@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); } -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { - struct bcma_mdio *bcma_mdio; + struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; int err; - bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); - if (!bcma_mdio) - return ERR_PTR(-ENOMEM); - mii_bus = mdiobus_alloc(); if (!mii_bus) { err = -ENOMEM; @@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) mii_bus->name = "bcma_mdio mii bus"; sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, core->core_unit); - mii_bus->priv = bcma_mdio; + mii_bus->priv = bgmac; mii_bus->read = bcma_mdio_mii_read; mii_bus->write = bcma_mdio_mii_write; mii_bus->reset = bcma_mdio_phy_reset; mii_bus->parent = &core->dev; - mii_bus->phy_mask = ~(1 << phyaddr); - - bcma_mdio->core = core; - bcma_mdio->phyaddr = phyaddr; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); err = mdiobus_register(mii_bus); if (err) { @@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) err_free_bus: mdiobus_free(mii_bus); err: - kfree(bcma_mdio); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) { - struct bcma_mdio *bcma_mdio; - if (!mii_bus) return; - bcma_mdio = mii_bus->priv; - mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); - kfree(bcma_mdio); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 4a4ffc0c4c65..5ef60d4f12b4 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core) u8 *mac; int err; - bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&core->dev); if (!bgmac) return -ENOMEM; bgmac->bcma.core = core; - bgmac->dev = &core->dev; bgmac->dma_dev = core->dma_dev; bgmac->irq = core->irq; @@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { - mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; @@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core) err1: bcma_mdio_mii_unregister(bgmac->mii_bus); err: - kfree(bgmac); bcma_set_drvdata(core, NULL); return err; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6f736c19872f..805e6ed6c390 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev) struct resource *regs; const u8 *mac_addr; - bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 0e066dc6b8cc..fe88126b1e0c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1148,7 +1148,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) return weight; if (handled < weight) { - napi_complete(napi); + napi_complete_done(napi, handled); bgmac_chip_intrs_on(bgmac); } @@ -1446,22 +1446,32 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); -int bgmac_enet_probe(struct bgmac *info) +struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; - int err; /* Allocation and references */ - net_dev = alloc_etherdev(sizeof(*bgmac)); + net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) - return -ENOMEM; + return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; + bgmac = netdev_priv(net_dev); - memcpy(bgmac, info, sizeof(*bgmac)); + bgmac->dev = dev; bgmac->net_dev = net_dev; + + return bgmac; +} +EXPORT_SYMBOL_GPL(bgmac_alloc); + +int bgmac_enet_probe(struct bgmac *bgmac) +{ + struct net_device *net_dev = bgmac->net_dev; + int err; + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); @@ -1488,7 +1498,7 @@ int bgmac_enet_probe(struct bgmac *info) err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); - goto err_netdev_free; + goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; @@ -1521,8 +1531,7 @@ err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); -err_netdev_free: - free_netdev(net_dev); +err_out: return err; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 71f493f2451f..ab2db76e4fb8 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -517,12 +517,13 @@ struct bgmac { int (*phy_connect)(struct bgmac *bgmac); }; -int bgmac_enet_probe(struct bgmac *info); +struct bgmac *bgmac_alloc(struct device *dev); +int bgmac_enet_probe(struct bgmac *bgmac); void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index de1d07c08495..e3af1f3cb61f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3e199d3e461e..9e8c06130c09 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - - /* put page reference used by the memory pool, since we - * won't be using this page as the mempool anymore. - */ - if (pool->page) - put_page(pool->page); - + if (!pool->page) { pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); if (unlikely(!pool->page)) return -ENOMEM; @@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; @@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, sge->addr_lo = cpu_to_le32(U64_LO(mapping)); pool->offset += SGE_PAGE_SIZE; - + if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) + get_page(pool->page); + else + pool->page = NULL; return 0; } @@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) * has been updated when NAPI was scheduled. */ if (IS_FCOE_FP(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 5f19427c7b27..43423744fdfa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp) return port_type; } -static int bnx2x_get_vf_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int bnx2x_get_vf_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (bp->state == BNX2X_STATE_OPEN) { if (test_bit(BNX2X_LINK_REPORT_FD, &bp->vf_link_vars.link_report_flags)) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; - ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); + cmd->base.speed = bp->vf_link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = PORT_OTHER; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); u32 media_type; + u32 supported, advertising, lp_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + cmd->link_modes.lp_advertising); /* Dual Media boards present all available port types */ - cmd->supported = bp->port.supported[cfg_idx] | + supported = bp->port.supported[cfg_idx] | (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); - cmd->advertising = bp->port.advertising[cfg_idx]; + advertising = bp->port.advertising[cfg_idx]; media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; if (media_type == ETH_PHY_SFP_1G_FIBER) { - cmd->supported &= ~(SUPPORTED_10000baseT_Full); - cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + supported &= ~(SUPPORTED_10000baseT_Full); + advertising &= ~(ADVERTISED_10000baseT_Full); } if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && !(bp->flags & MF_FUNC_DIS)) { - cmd->duplex = bp->link_vars.duplex; + cmd->base.duplex = bp->link_vars.duplex; if (IS_MF(bp) && !BP_NOMCP(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + cmd->base.speed = bnx2x_get_mf_speed(bp); else - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + cmd->base.speed = bp->link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = bnx2x_get_port_type(bp); + cmd->base.port = bnx2x_get_port_type(bp); - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.phy_address = bp->mdio.prtad; if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; /* Publish LP advertised speeds and FC */ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { u32 status = bp->link_vars.link_status; - cmd->lp_advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Pause; + lp_advertising |= ADVERTISED_Pause; if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Asym_Pause; + lp_advertising |= ADVERTISED_Asym_Pause; if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Half; + lp_advertising |= ADVERTISED_10baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Full; + lp_advertising |= ADVERTISED_10baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Half; + lp_advertising |= ADVERTISED_100baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Full; + lp_advertising |= ADVERTISED_100baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + lp_advertising |= ADVERTISED_1000baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseKX_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + lp_advertising |= ADVERTISED_2500baseX_Full; if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseKR_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + lp_advertising |= ADVERTISED_20000baseKR2_Full; } - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; u32 speed, phy_idx; + u32 supported; + u8 duplex = cmd->base.duplex; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (IS_MF_SD(bp)) return 0; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; + if (duplex == DUPLEX_UNKNOWN) + duplex = DUPLEX_FULL; if (IS_MF_SI(bp)) { u32 part; @@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - if (cmd->port != bnx2x_get_port_type(bp)) { - switch (cmd->port) { + if (cmd->base.port != bnx2x_get_port_type(bp)) { + switch (cmd->base.port) { case PORT_TP: if (!(bp->port.supported[0] & SUPPORTED_TP || bp->port.supported[1] & SUPPORTED_TP)) { @@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->link_params.multi_phy_config = old_multi_phy_config; DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; if (bp->link_params.phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) @@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } /* advertise the requested speed and duplex if supported */ - if (cmd->advertising & ~an_supported_speed) { + if (advertising & ~an_supported_speed) { DP(BNX2X_MSG_ETHTOOL, "Advertisement parameters are not supported\n"); return -EINVAL; } bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | - cmd->advertising); - if (cmd->advertising) { + advertising); + if (advertising) { bp->link_params.speed_cap_mask[cfg_idx] = 0; - if (cmd->advertising & ADVERTISED_10baseT_Half) { + if (advertising & ADVERTISED_10baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; } - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Half) { + if (advertising & ADVERTISED_100baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; } - if (cmd->advertising & ADVERTISED_1000baseT_Half) { + if (advertising & ADVERTISED_1000baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; } - if (cmd->advertising & (ADVERTISED_1000baseT_Full | + if (advertising & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseKX_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; - if (cmd->advertising & (ADVERTISED_10000baseT_Full | + if (advertising & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; - if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + if (advertising & ADVERTISED_20000baseKR2_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } @@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* advertise the requested speed and duplex if supported */ switch (speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "1G half not supported\n"); return -EINVAL; @@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "2.5G half not supported\n"); return -EINVAL; @@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "10G half not supported\n"); return -EINVAL; @@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } bp->link_params.req_line_speed[cfg_idx] = speed; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = advertising; } @@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev, } static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, @@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, .get_ts_info = bnx2x_get_ts_info, + .get_link_ksettings = bnx2x_get_link_ksettings, + .set_link_ksettings = bnx2x_set_link_ksettings, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { - .get_settings = bnx2x_get_vf_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_msglevel = bnx2x_get_msglevel, .set_msglevel = bnx2x_set_msglevel, @@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, + .get_link_ksettings = bnx2x_get_vf_link_ksettings, }; void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7bd2a85694dd..aff3dc114a5b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1096,7 +1096,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, { #ifdef CONFIG_INET struct tcphdr *th; - int len, nw_off, tcp_opt_len; + int len, nw_off, tcp_opt_len = 0; if (tcp_ts) tcp_opt_len = 12; @@ -1759,7 +1759,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) } if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_pkts); BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); } return rx_pkts; @@ -2467,6 +2467,8 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) static void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; if (bp->dev->features & NETIF_F_GRO) @@ -4944,6 +4946,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +#ifdef CONFIG_RFS_ACCEL static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -4961,6 +4964,7 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) #endif return bp->pf.max_vnics; } +#endif unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { @@ -5437,17 +5441,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { /* An advertised speed is no longer supported, so we need to - * update the advertisement settings. See bnxt_reset() for - * comments about the rtnl_lock() sequence below. + * update the advertisement settings. Caller holds RTNL + * so we can modify link settings. */ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); link_info->advertising = link_info->support_auto_speeds; - if (test_bit(BNXT_STATE_OPEN, &bp->state) && - (link_info->autoneg & BNXT_AUTONEG_SPEED)) + if (link_info->autoneg & BNXT_AUTONEG_SPEED) bnxt_hwrm_set_link_setting(bp, true, false); - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_unlock(); } return 0; } @@ -5617,6 +5616,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -6324,29 +6362,37 @@ bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } -/* Only called from bnxt_sp_task() */ -static void bnxt_reset(struct bnxt *bp, bool silent) +static void bnxt_rtnl_lock_sp(struct bnxt *bp) { - /* bnxt_reset_task() calls bnxt_close_nic() which waits - * for BNXT_STATE_IN_SP_TASK to clear. - * If there is a parallel dev_close(), bnxt_close() may be holding + /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK + * set. If the device is being closed, bnxt_close() may be holding * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). */ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); rtnl_lock(); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_reset_task(bp, silent); +} + +static void bnxt_rtnl_unlock_sp(struct bnxt *bp) +{ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); rtnl_unlock(); } +/* Only called from bnxt_sp_task() */ +static void bnxt_reset(struct bnxt *bp, bool silent) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_reset_task(bp, silent); + bnxt_rtnl_unlock_sp(bp); +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, sp_task); - int rc; set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); smp_mb__after_atomic(); @@ -6360,16 +6406,6 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); - if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { - if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, - &bp->sp_event)) - bnxt_hwrm_phy_qcaps(bp); - - rc = bnxt_update_link(bp, true); - if (rc) - netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", - rc); - } if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { @@ -6390,18 +6426,39 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_port_qstats(bp); + + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They + * must be the last functions to be called before exiting. + */ + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + int rc = 0; + + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + rc = bnxt_update_link(bp, true); + bnxt_rtnl_unlock_sp(bp); + if (rc) + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", + rc); + } + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_get_port_module_status(bp); + bnxt_rtnl_unlock_sp(bp); + } if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, false); if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, true); - if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) - bnxt_get_port_module_status(bp); - - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) - bnxt_hwrm_port_qstats(bp); - smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } @@ -7240,6 +7297,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_port_led_qcaps(bp); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f6b9b1c530fe..52a1cc061ba3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -868,6 +868,20 @@ struct bnxt_queue_info { u8 queue_profile; }; +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + u8 led_id; + u8 led_type; + u8 led_group_id; + u8 unused; + __le16 led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED)) + + __le16 led_color_caps; +}; + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 @@ -1123,6 +1137,9 @@ struct bnxt { struct ethtool_eee eee; u32 lpi_tmr_lo; u32 lpi_tmr_hi; + + u8 num_leds; + struct bnxt_led_info leds[BNXT_MAX_LED]; }; #define BNXT_RX_STATS_OFFSET(counter) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index dd21be4a5fdd..24818e1e59f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2080,6 +2080,47 @@ static int bnxt_nway_reset(struct net_device *dev) return rc; } +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + u8 led_state; + __le16 duration; + int i, rc; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (state == ETHTOOL_ID_ACTIVE) { + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = cpu_to_le16(500); + } else if (state == ETHTOOL_ID_INACTIVE) { + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = cpu_to_le16(0); + } else { + return -EINVAL; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, @@ -2111,5 +2152,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_eee = bnxt_set_eee, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, - .nway_reset = bnxt_nway_reset + .nway_reset = bnxt_nway_reset, + .set_phys_id = bnxt_set_phys_id, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3abc03b60dbc..ed1e555292e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -10,6 +10,29 @@ #ifndef BNXT_ETHTOOL_H #define BNXT_ETHTOOL_H +struct bnxt_led_cfg { + u8 led_id; + u8 led_state; + u8 led_color; + u8 unused; + __le16 led_blink_on; + __le16 led_blink_off; + u8 led_group_id; + u8 rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ + PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \ + PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index d0d49ed71ce4..5df32ab64f0c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,12 +11,12 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.6.0 */ +/* HSI and HWRM Specification 1.6.1 */ #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 6 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_STR "1.6.0" +#define HWRM_VERSION_STR "1.6.1" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -549,6 +549,8 @@ struct hwrm_ver_get_output { __le32 dev_caps_cfg; #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL u8 roce_fw_maj; u8 roce_fw_min; u8 roce_fw_bld; @@ -1919,6 +1921,219 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_port_led_cfg */ +/* Input (64 bytes) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* Output (16 bytes) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_led_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused_0[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + u8 led0_group_id; + u8 unused_1; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + u8 led1_group_id; + u8 unused_2; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + u8 led2_group_id; + u8 unused_3; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + u8 led3_group_id; + u8 unused_4; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_5; + u8 unused_6; + u8 unused_7; + u8 valid; +}; + /* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { @@ -4092,9 +4307,7 @@ struct hwrm_fw_set_structured_data_input { __le64 src_data_addr; __le16 data_len; u8 hdr_cnt; - u8 unused_0; - __le16 port_id; - __le16 unused_1; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4111,7 +4324,7 @@ struct hwrm_fw_set_structured_data_output { }; /* hwrm_fw_get_structured_data */ -/* Input (40 bytes) */ +/* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { __le16 req_type; __le16 cmpl_ring; @@ -4131,8 +4344,6 @@ struct hwrm_fw_get_structured_data_input { #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL u8 count; u8 unused_0; - __le16 port_id; - __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4616,7 +4827,8 @@ struct hwrm_nvm_install_update_input { __le32 install_type; #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL - __le32 unused_0; + __le16 flags; + __le16 unused_0; }; /* Output (24 bytes) */ @@ -4973,12 +5185,13 @@ struct ctx_hw_stats { struct hwrm_struct_hdr { __le16 struct_id; #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL - #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL - #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL - #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL - #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL - #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL - #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL __le16 len; u8 version; u8 count; @@ -4988,14 +5201,14 @@ struct hwrm_struct_hdr { __le16 unused_0[3]; }; -/* DCBX Application configuration structure (8 bytes) */ -struct hwrm_struct_data_dcbx_app_cfg { - __le16 protocol_id; +/* DCBX Application configuration structure (1057) (8 bytes) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; u8 protocol_selector; - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL u8 priority; u8 valid; u8 unused_0[3]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 64ef0e5dad8c..0b8cd7443843 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -15,6 +15,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -555,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out2; + bnxt_ulp_sriov_cfg(bp, *num_vfs); + rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; @@ -596,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp) rtnl_lock(); bnxt_restore_pf_fw_resources(bp); rtnl_unlock(); + + bnxt_ulp_sriov_cfg(bp, 0); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 435a2e4739d1..89d4feba1a9a 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 73a94113db1f..6e13c937d715 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) return rcvd; poll_exit: - napi_complete(napi); + napi_complete_done(napi, rcvd); rx_ctrl->rx_complete++; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c0fb80acc2da..d7d135c95509 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1090,7 +1090,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* Packets received while interrupts were disabled */ status = macb_readl(bp, RSR); @@ -2085,6 +2085,9 @@ static int macb_open(struct net_device *dev) netif_tx_start_all_queues(dev); + if (bp->ptp_info) + bp->ptp_info->ptp_init(dev); + return 0; } @@ -2106,6 +2109,9 @@ static int macb_close(struct net_device *dev) macb_free_consistent(bp); + if (bp->ptp_info) + bp->ptp_info->ptp_remove(dev); + return 0; } @@ -2379,6 +2385,17 @@ static int macb_set_ringparam(struct net_device *netdev, return 0; } +static int macb_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct macb *bp = netdev_priv(netdev); + + if (bp->ptp_info) + return bp->ptp_info->get_ts_info(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2396,7 +2413,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, @@ -2409,6 +2426,7 @@ static const struct ethtool_ops gem_ethtool_ops = { static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct phy_device *phydev = dev->phydev; + struct macb *bp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; @@ -2416,7 +2434,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, rq, cmd); + if (!bp->ptp_info) + return phy_mii_ioctl(phydev, rq, cmd); + + switch (cmd) { + case SIOCSHWTSTAMP: + return bp->ptp_info->set_hwtst(dev, rq, cmd); + case SIOCGHWTSTAMP: + return bp->ptp_info->get_hwtst(dev, rq); + default: + return phy_mii_ioctl(phydev, rq, cmd); + } } static int macb_set_features(struct net_device *netdev, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d67adad67be1..94ddedd2d83e 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -131,6 +131,20 @@ #define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ #define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ #define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */ +#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */ +#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */ +#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */ +#define GEM_TA 0x01d8 /* 1588 Timer Adjust */ +#define GEM_TI 0x01dc /* 1588 Timer Increment */ +#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */ +#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */ +#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */ +#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */ +#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */ +#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ +#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ +#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -174,6 +188,7 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 +#define MACB_SRTSM_OFFSET 15 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -319,6 +334,32 @@ #define MACB_PTZ_SIZE 1 #define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ #define MACB_WOL_SIZE 1 +#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */ +#define MACB_DRQFR_SIZE 1 +#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */ +#define MACB_SFR_SIZE 1 +#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */ +#define MACB_DRQFT_SIZE 1 +#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */ +#define MACB_SFT_SIZE 1 +#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */ +#define MACB_PDRQFR_SIZE 1 +#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */ +#define MACB_PDRSFR_SIZE 1 +#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */ +#define MACB_PDRQFT_SIZE 1 +#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */ +#define MACB_PDRSFT_SIZE 1 +#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ +#define MACB_SRI_SIZE 1 + +/* Timer increment fields */ +#define MACB_TI_CNS_OFFSET 0 +#define MACB_TI_CNS_SIZE 8 +#define MACB_TI_ACNS_OFFSET 8 +#define MACB_TI_ACNS_SIZE 8 +#define MACB_TI_NIT_OFFSET 16 +#define MACB_TI_NIT_SIZE 8 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -386,6 +427,17 @@ #define GEM_PBUF_LSO_OFFSET 27 #define GEM_PBUF_LSO_SIZE 1 +/* Bitfields in TISUBN */ +#define GEM_SUBNSINCR_OFFSET 0 +#define GEM_SUBNSINCR_SIZE 16 + +/* Bitfields in TI */ +#define GEM_NSINCR_OFFSET 0 +#define GEM_NSINCR_SIZE 8 + +/* Bitfields in ADJ */ +#define GEM_ADDSUB_OFFSET 31 +#define GEM_ADDSUB_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 #define MACB_CLK_DIV16 1 @@ -413,6 +465,7 @@ #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_JUMBO 0x00000020 +#define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -782,6 +835,20 @@ struct macb_or_gem_ops { int (*mog_rx)(struct macb *bp, int budget); }; +/* MACB-PTP interface: adapt to platform needs. */ +struct macb_ptp_info { + void (*ptp_init)(struct net_device *ndev); + void (*ptp_remove)(struct net_device *ndev); + s32 (*get_ptp_max_adj)(void); + unsigned int (*get_tsu_rate)(struct macb *bp); + int (*get_ts_info)(struct net_device *dev, + struct ethtool_ts_info *info); + int (*get_hwtst)(struct net_device *netdev, + struct ifreq *ifr); + int (*set_hwtst)(struct net_device *netdev, + struct ifreq *ifr, int cmd); +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -874,6 +941,8 @@ struct macb { unsigned int jumbo_max_len; u32 wol; + + struct macb_ptp_info *ptp_info; /* macb-ptp interface */ }; static inline bool macb_is_gem(struct macb *bp) @@ -881,4 +950,9 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } +static inline bool gem_has_ptp(struct macb *bp) +{ + return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP); +} + #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index b0540658afad..2bd7c638b178 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) work_done = xgmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b8b579d8043f..c12cfa4113cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2223,25 +2223,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - u32 qindex = 0; - struct lio *lio; - - lio = GET_LIO(dev); - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -2379,7 +2360,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -2465,8 +2445,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2693,13 +2677,7 @@ static int liquidio_stop(struct net_device *netdev) lio->linfo.link.s.link_up = 0; lio->link_changes++; - /* Pause for a moment and wait for Octeon to flush out (to the wire) any - * egress packets that are in-flight. - */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); - - /* Now it should be safe to tell Octeon that nic interface is down. */ + /* Tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); if (OCTEON_CN23XX_PF(oct)) { @@ -3342,11 +3320,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -3761,7 +3739,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_link_state = liquidio_set_vf_link_state, - .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ad2e72d7d522..631f1c0f9e4d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1455,26 +1455,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - struct lio *lio; - u32 qindex; - - lio = GET_LIO(dev); - - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -1591,7 +1571,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -1651,8 +1630,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2454,11 +2437,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -2717,7 +2700,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, - .ndo_select_queue = select_q, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index bc0af8af1a0c..294c6f3c6b48 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -99,6 +99,7 @@ enum octeon_tag_type { #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) #define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U static inline u32 incr_index(u32 index, u32 count, u32 max) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index a8df493a5012..9675ffbf25e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&droq->lock); writel(droq->pkt_count, droq->pkts_sent_reg); droq->pkt_count = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } @@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&iq->lock); writel(iq->pkt_in_done, iq->inst_cnt_reg); iq->pkt_in_done = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 73696b427f06..201b9875f9bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct, { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); @@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { - schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; @@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index fe60a3e6247b..c9376fe075bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -31,8 +31,8 @@ #define OCTEON_PFVFSIG 0x1122334455667788 #define OCTEON_PFVFERR 0xDEADDEADDEADDEAD -#define LIO_MBOX_WRITE_WAIT_CNT 1000 -#define LIO_MBOX_WRITE_WAIT_TIME 10 +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) enum octeon_mbox_cmd_status { OCTEON_MBOX_STATUS_SUCCESS = 0, diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 21f80f5744ba..a2138686c605 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* We stopped because no more packets were available. */ - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2e74bbaa38e1..02a986cdbb39 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; - ring->rx_max_pending = MAX_RCV_BUF_COUNT; - ring->rx_pending = qs->rbdr_len; + ring->rx_max_pending = MAX_CMP_QUEUE_LEN; + ring->rx_pending = qs->cq_len; ring->tx_max_pending = MAX_SND_QUEUE_LEN; ring->tx_pending = qs->sq_len; } +static int nicvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + u32 rx_count, tx_count; + + /* Due to HW errata this is not supported on T88 pass 1.x silicon */ + if (pass1_silicon(nic->pdev)) + return -EINVAL; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_count = clamp_t(u32, ring->tx_pending, + MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); + rx_count = clamp_t(u32, ring->rx_pending, + MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); + + if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) + return 0; + + /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ + qs->sq_len = rounddown_pow_of_two(tx_count); + qs->cq_len = rounddown_pow_of_two(rx_count); + + if (netif_running(netdev)) { + nicvf_stop(netdev); + nicvf_open(netdev); + } + + return 0; +} + static int nicvf_get_rss_hash_opts(struct nicvf *nic, struct ethtool_rxnfc *info) { @@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, } static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *hkey, u8 hfunc) + const u8 *hkey, const u8 hfunc) { struct nicvf *nic = netdev_priv(dev); struct nicvf_rss_info *rss = &nic->rss_info; @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_regs = nicvf_get_regs, .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, + .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 273eafdb1c57..6feaa24bcfd4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* Slow packet rate, exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* Re-enable interrupts */ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->cq_idx); @@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev) /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); - if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + err = nicvf_update_hw_max_frs(nic, netdev->mtu); + if (err) goto cleanup; /* Clear percpu stats */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d2ac133e36f1..ac0390be3b12 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; - cq_cfg.qsize = CMP_QSIZE; + cq_cfg.qsize = ilog2(qs->cq_len >> 10); cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; - sq_cfg.qsize = SND_QSIZE; + sq_cfg.qsize = ilog2(qs->sq_len >> 10); sq_cfg.tstmp_bgx_intf = 0; - sq_cfg.cq_limit = 0; + /* CQ's level at which HW will stop processing SQEs to avoid + * transmitting a pkt with no space in CQ to post CQE_TX. + */ + sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) { bool disable = false; struct queue_set *qs = nic->qs; + struct queue_set *pqs = nic->pnicvf->qs; int qidx; if (!qs) return 0; + /* Take primary VF's queue lengths. + * This is needed to take queue lengths set from ethtool + * into consideration. + */ + if (nic->sqs_mode && pqs) { + qs->cq_len = pqs->cq_len; + qs->sq_len = pqs->sq_len; + } + if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 9e2104675bc9..5cb84da99a2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -59,8 +59,9 @@ /* Default queue count per QS, its lengths and threshold values */ #define DEFAULT_RBDR_CNT 1 -#define SND_QSIZE SND_QUEUE_SIZE2 +#define SND_QSIZE SND_QUEUE_SIZE0 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL #define MIN_SQ_DESC_PER_PKT_XMIT 2 @@ -70,11 +71,18 @@ /* Keep CQ and SQ sizes same, if timestamping * is enabled this equation will change. */ -#define CMP_QSIZE CMP_QUEUE_SIZE2 +#define CMP_QSIZE CMP_QUEUE_SIZE0 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10)) +#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10)) #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ +/* No of CQEs that might anyway gets used by HW due to pipelining + * effects irrespective of PASS/DROP/LEVELS being configured + */ +#define CMP_QUEUE_PIPELINE_RSVD 544 + #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) @@ -93,8 +101,8 @@ * RED accepts pkt if unused CQE < 2304 & >= 2560 * DROPs pkts if unused CQE < 2304 */ -#define RQ_PASS_CQ_LVL 160ULL -#define RQ_DROP_CQ_LVL 144ULL +#define RQ_PASS_CQ_LVL 192ULL +#define RQ_DROP_CQ_LVL 184ULL /* RED and Backpressure levels of RBDR for pkt reception * For RBDR, level is a measure of fullness i.e 0x0 means empty diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9211c750e064..dfb2bad7ced5 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -47,8 +47,9 @@ struct lmac { struct bgx { u8 bgx_id; struct lmac lmac[MAX_LMAC_PER_BGX]; - int lmac_count; + u8 lmac_count; u8 max_lmac; + u8 acpi_lmac_idx; void __iomem *reg_base; struct pci_dev *pdev; bool is_dlm; @@ -893,17 +894,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) struct device *dev = &bgx->pdev->dev; struct lmac *lmac; char str[20]; - u8 dlm; - if (lmacid > bgx->max_lmac) + if (!bgx->is_dlm && lmacid) return; lmac = &bgx->lmac[lmacid]; - dlm = (lmacid / 2) + (bgx->bgx_id * 2); if (!bgx->is_dlm) sprintf(str, "BGX%d QLM mode", bgx->bgx_id); else - sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); + sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); switch (lmac->lmac_type) { case BGX_MODE_SGMII: @@ -989,7 +988,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) { struct lmac *lmac; - struct lmac *olmac; u64 cmr_cfg; u8 lmac_type; u8 lane_to_sds; @@ -1009,62 +1007,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) return; } - /* On 81xx BGX can be split across 2 DLMs - * firmware programs lmac_type of LMAC0 and LMAC2 + /* For DLMs or SLMs on 80/81/83xx so many lane configurations + * are possible and vary across boards. Also Kernel doesn't have + * any way to identify board type/info and since firmware does, + * just take lmac type and serdes lane config as is. */ - if ((idx == 0) || (idx == 2)) { - cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - /* Check if config is not reset value */ - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) - lmac->lmac_type = BGX_MODE_INVALID; - else - lmac->lmac_type = lmac_type; - lmac_set_training(bgx, lmac, lmac->lmacid); - lmac_set_lane2sds(bgx, lmac); - - olmac = &bgx->lmac[idx + 1]; - /* Check if other LMAC on the same DLM is already configured by - * firmware, if so use the same config or else set as same, as - * that of LMAC 0/2. - * This check is needed as on 80xx only one lane of each of the - * DLM of BGX0 is used, so have to rely on firmware for - * distingushing 80xx from 81xx. - */ - cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { - olmac->lmac_type = lmac->lmac_type; - lmac_set_lane2sds(bgx, olmac); - } else { - olmac->lmac_type = lmac_type; - olmac->lane_to_sds = lane_to_sds; - } - lmac_set_training(bgx, olmac, olmac->lmacid); - } -} - -static bool is_dlm0_in_bgx_mode(struct bgx *bgx) -{ - struct lmac *lmac; - - if (!bgx->is_dlm) - return true; - - lmac = &bgx->lmac[0]; - if (lmac->lmac_type == BGX_MODE_INVALID) - return false; - - return true; + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac->lane_to_sds = lane_to_sds; + lmac_set_training(bgx, lmac, lmac->lmacid); } static void bgx_get_qlm_mode(struct bgx *bgx) { struct lmac *lmac; - struct lmac *lmac01; - struct lmac *lmac23; u8 idx; /* Init all LMAC's type to invalid */ @@ -1080,29 +1042,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx) if (bgx->lmac_count > bgx->max_lmac) bgx->lmac_count = bgx->max_lmac; - for (idx = 0; idx < bgx->max_lmac; idx++) - bgx_set_lmac_config(bgx, idx); - - if (!bgx->is_dlm || bgx->is_rgx) { - bgx_print_qlm_mode(bgx, 0); - return; - } - - if (bgx->lmac_count) { - bgx_print_qlm_mode(bgx, 0); - bgx_print_qlm_mode(bgx, 2); - } - - /* If DLM0 is not in BGX mode then LMAC0/1 have - * to be configured with serdes lanes of DLM1 - */ - if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) - return; for (idx = 0; idx < bgx->lmac_count; idx++) { - lmac01 = &bgx->lmac[idx]; - lmac23 = &bgx->lmac[idx + 2]; - lmac01->lmac_type = lmac23->lmac_type; - lmac01->lane_to_sds = lmac23->lane_to_sds; + bgx_set_lmac_config(bgx, idx); + bgx_print_qlm_mode(bgx, idx); } } @@ -1143,13 +1085,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; + bgx->acpi_lmac_idx++; /* move to next LMAC */ out: - bgx->lmac_count++; return AE_OK; } diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 86f467a2c485..d56142b98534 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget) int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e4b5b057f417..1b9d154f1149 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget) __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { - napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irq(&q->lock); return work_done; } @@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* * Because we don't atomically flush the following diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ad0096e74813..ccb455f14d08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1501,6 +1501,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_shutdown_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3349e1f376c3..49e000ebd2b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2583,6 +2583,19 @@ static int cxgb_get_vf_config(struct net_device *dev, ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } + +static int cxgb_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct port_info *pi = netdev_priv(dev); + unsigned int phy_port_id; + + phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; + ppid->id_len = sizeof(phy_port_id); + memcpy(ppid->id, &phy_port_id, ppid->id_len); + return 0; +} + #endif static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2762,6 +2775,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@ -2782,8 +2796,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { void t4_fatal_err(struct adapter *adap) { - t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); - t4_intr_disable(adap); + int port; + + /* Disable the SGE since ULDs are going to free resources that + * could be exposed to the adapter. RDMA MWs for example... + */ + t4_shutdown_adapter(adap); + for_each_port(adap, port) { + struct net_device *dev = adap->port[port]; + + /* If we get here in very early initialization the network + * devices may not have been set up yet. + */ + if (!dev) + continue; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); } @@ -4516,12 +4546,14 @@ static int config_mgmt_dev(struct pci_dev *pdev) int err; snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); - netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, + dummy_setup); if (!netdev) return -ENOMEM; pi = netdev_priv(netdev); pi->adapter = adap; + pi->port_id = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; @@ -4611,6 +4643,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; +#ifdef CONFIG_PCI_IOV + u32 v, port_vec; +#endif printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4990,6 +5025,19 @@ sriov: err = -ENOMEM; goto free_adapter; } + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); + + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, + &v, &port_vec); + if (err < 0) { + dev_err(adapter->pdev_dev, "Could not fetch port params\n"); + goto free_adapter; + } + + adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index cbd68a8fe2e4..c9026352a842 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, struct ch_sched_params info; struct ch_sched_params tp; - memset(&info, 0, sizeof(info)); - memset(&tp, 0, sizeof(tp)); - memcpy(&tp, p, sizeof(tp)); /* Don't try to match class parameter */ tp.u.params.class = SCHED_CLS_NONE; @@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, if (e->state == SCHED_STATE_UNUSED) continue; - memset(&info, 0, sizeof(info)); memcpy(&info, &e->info, sizeof(info)); /* Don't try to match class parameter */ info.u.params.class = SCHED_CLS_NONE; @@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (!e) goto out; - memset(&np, 0, sizeof(np)); memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f113015074b1..87000cd39737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -330,11 +330,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, * mailbox access list but this is a start. We very rearely * contend on access to the mailbox ... */ - if (i > FW_CMD_MAX_TIMEOUT) { + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { spin_lock(&adap->mbox_lock); list_del(&entry.list); spin_unlock(&adap->mbox_lock); - ret = -EBUSY; + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; t4_record_mbox(adap, cmd, size, access, ret); return ret; } @@ -432,6 +433,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, spin_lock(&adap->mbox_lock); list_del(&entry.list); spin_unlock(&adap->mbox_lock); + t4_fatal_err(adap); return ret; } @@ -5501,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx, void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A); #define GET_STAT(name) \ t4_read_reg64(adap, \ @@ -5532,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATTX_F) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCTX_F) + p->tx_mcast_frames -= p->tx_pause; + } p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); @@ -5560,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATRX_F) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCRX_F) + p->rx_mcast_frames -= p->rx_pause; + } + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; @@ -7540,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter) } /** + * t4_shutdown_adapter - shut down adapter, host & wire + * @adapter: the adapter + * + * Perform an emergency shutdown of the adapter and stop it from + * continuing any further communication on the ports or DMA to the + * host. This is typically used when the adapter and/or firmware + * have crashed and we want to prevent any further accidental + * communication with the rest of the world. This will also force + * the port Link Status to go down -- if register writes work -- + * which should help our peers figure out that we're down. + */ +int t4_shutdown_adapter(struct adapter *adapter) +{ + int port; + + t4_intr_disable(adapter); + t4_write_reg(adapter, DBG_GPIO_EN_A, 0); + for_each_port(adapter, port) { + u32 a_port_cfg = PORT_REG(port, + is_t4(adapter->params.chip) + ? XGMAC_PORT_CFG_A + : MAC_PORT_CFG_A); + + t4_write_reg(adapter, a_port_cfg, + t4_read_reg(adapter, a_port_cfg) + & ~SIGNAL_DET_V(1)); + } + t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0); + + return 0; +} + +/** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e685163b1357..3348d33c36fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -855,6 +855,14 @@ #define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S) #define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U) +#define DBG_GPIO_EN_A 0x6010 +#define XGMAC_PORT_CFG_A 0x1000 +#define MAC_PORT_CFG_A 0x800 + +#define SIGNAL_DET_S 14 +#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S) +#define SIGNAL_DET_F SIGNAL_DET_V(1U) + #define MC_ECC_STATUS_A 0x751c #define MC_P_ECC_STATUS_A 0x4131c @@ -1798,12 +1806,29 @@ #define MPS_CMN_CTL_A 0x9000 +#define COUNTPAUSEMCRX_S 5 +#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S) +#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U) + +#define COUNTPAUSESTATRX_S 4 +#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S) +#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U) + +#define COUNTPAUSEMCTX_S 3 +#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S) +#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U) + +#define COUNTPAUSESTATTX_S 2 +#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S) +#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U) + #define NUMPORTS_S 0 #define NUMPORTS_M 0x3U #define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M) #define MPS_INT_CAUSE_A 0x9008 #define MPS_TX_INT_CAUSE_A 0x9408 +#define MPS_STAT_CTL_A 0x9600 #define FRMERR_S 15 #define FRMERR_V(x) ((x) << FRMERR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 2accab386323..5fdaa16426c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0F -#define T4FW_VERSION_MICRO 0x25 +#define T4FW_VERSION_MINOR 0x10 +#define T4FW_VERSION_MICRO 0x1A #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0F -#define T5FW_VERSION_MICRO 0x25 +#define T5FW_VERSION_MINOR 0x10 +#define T5FW_VERSION_MICRO 0x1A #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0F -#define T6FW_VERSION_MICRO 0x25 +#define T6FW_VERSION_MINOR 0x10 +#define T6FW_VERSION_MICRO 0x1A #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f3ed9ce99e5e..e37dde2ba97f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) u32 val; if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); intr_params = rspq->next_intr_params; rspq->next_intr_params = rspq->intr_params; } else diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c5842c525eed..91e42bea151c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1343,7 +1343,7 @@ static int enic_poll(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, rq_work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); @@ -1500,7 +1500,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index 92306b320840..ba6ae24acf62 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Remove us from polling list and enable RX intr. */ - napi_complete(napi); - iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + napi_complete_done(napi, work_done); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: * 1. it can race with disabling irqs in irq handler @@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget) * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - napi_complete(napi); + napi_complete_done(napi, work_done); return work_done; } diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2a17c59f69f9..3e77dd863175 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts. */ - napi_complete(napi); + napi_complete_done(napi, npackets); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 0e74529a4209..30e855004c57 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: mutex_unlock(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b75744fe9eb0..b3a054026e71 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0; + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC + * address + */ + if (BEx_chip(adapter) && be_virtfn(adapter) && + !check_privilege(adapter, BE_PRIV_FILTMGMT)) + return -EPERM; + /* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done; @@ -3317,7 +3324,7 @@ int be_poll(struct napi_struct *napi, int budget) be_process_mcc(adapter); if (max_work < budget) { - napi_complete(napi); + napi_complete_done(napi, max_work); /* Skyhawk EQ_DB has a provision to set the rearm to interrupt * delay via a delay multiplier encoding value @@ -3608,7 +3615,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) static void be_disable_if_filters(struct be_adapter *adapter) { - be_dev_mac_del(adapter, adapter->pmac_id[0]); + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) + be_dev_mac_del(adapter, adapter->pmac_id[0]); + be_clear_uc_list(adapter); be_clear_mc_list(adapter); @@ -3761,8 +3772,9 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* For BE3 VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) { status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 63e5e14174ee..f18aba05f1c2 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -614,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget) tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } @@ -995,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev, return 0; } -const struct ethtool_ops ethoc_ethtool_ops = { +static const struct ethtool_ops ethoc_ethtool_ops = { .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .nway_reset = phy_ethtool_nway_reset, diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 223f35cc034c..992ebe973d25 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) if (work_done < budget) { u32 buf_int_enable_value = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index b7cbc26a0911..25a14a3fe784 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2001,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { - napi_complete(napi); + napi_complete_done(napi, cleaned); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } else if (np->down) { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 38160c2bebcb..2cc552ddd8f0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) fec_enet_tx(ndev); if (pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, pkts); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } return pkts; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 1f98838f32b7..54e3ce9bd94c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (received < budget && tx_left) { /* done */ - napi_complete(napi); + napi_complete_done(napi, received); (*fep->ops->napi_enable)(dev); return received; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a6e7afa878be..d0ebab705225 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, } /* try reuse page */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) return false; /* change offset to the other half */ @@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) if (work_done < budget) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); @@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget) if (!num_act_queues) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 9d660888510f..3f7ae9f64cd8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) howmany += ucc_geth_rx(ugeth, i, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 97b184774784..6e50ec82b3d8 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -555,7 +555,7 @@ refill: priv->reg_inten |= RCV_INT; writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); } - napi_complete(napi); + napi_complete_done(napi, rx); done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 979852d56f31..2c2808830e95 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hisi_femac_irq_enable(priv, DEF_INT_MASK & (~IRQ_INT_TX_PER_PACKET)); } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 418ca1f3774a..25a6c8722eca 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hix5hd2_irq_enable(priv); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index b7cb61385ad8..f7b75e96c1c3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, skb->protocol = eth_type_trans(skb, ndev); (void)napi_gro_receive(&ring_data->napi, skb); - ndev->last_rx = jiffies; } static int hns_desc_unused(struct hnae_ring *ring) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c6ba75c595e0..72ab7b6bf20b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1326,7 +1326,7 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { - napi_complete(napi); + napi_complete_done(napi, frames_processed); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. @@ -1607,8 +1607,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); - netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_features = NETIF_F_SG; + if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + } netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3c2526bde7a4..c46935d4a3fe 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -987,7 +987,7 @@ restart_poll: if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - napi_complete(napi); + napi_complete_done(napi, frames_processed); if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && napi_reschedule(napi)) { disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 04e939226e08..2b7323d392dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); e100_enable_irq(nic); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 79651eb608ff..2175cced402f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7546109d4980..be456bae8169 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ @@ -3964,8 +3964,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - __page_frag_drain(buffer_info->page, 0, - buffer_info->pagecnt_bias); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); buffer_info->page = NULL; } @@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - __page_frag_drain(page, 0, rx_buffer->pagecnt_bias); + __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5826b1ddedcf..fbd220d137b3 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__IXGB_DOWN, &adapter->flags)) ixgb_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 094e1d63309a..c38d50c1fcf7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -350,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); -#ifndef CONFIG_SPARC +#ifndef CONFIG_ARCH_WANT_RELAX_ORDER /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { u32 regval; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ffe7d940d9ff..3b3b52b62a5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -611,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", + "trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, - dev_trans_start(netdev), - netdev->last_rx); + dev_trans_start(netdev)); } /* Print Registers */ diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f9fcab54783c..f580b49e6b67 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev) jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); @@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev) jme->phylink = 0; jme_reset_phy_processor(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); /* * Force to Reset the link again @@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev, } static int -jme_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); return rc; } static int -jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc, fdc = 0; - if (ethtool_cmd_speed(ecmd) == SPEED_1000 - && ecmd->autoneg != AUTONEG_ENABLE) + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; /* @@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev, * Hardware would not generate link change interrupt. */ if (jme->mii_if.force_media && - ecmd->autoneg != AUTONEG_ENABLE && - (jme->mii_if.full_duplex != ecmd->duplex)) + cmd->base.autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != cmd->base.duplex)) fdc = 1; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); if (!rc) { if (fdc) jme_reset_link(jme); - jme->old_ecmd = *ecmd; + jme->old_cmd = *cmd; set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) if (!rc && (cmd == SIOCSMIIREG)) { if (duplex_chg) jme_reset_link(jme); - jme_get_settings(netdev, &jme->old_ecmd); + jme_get_link_ksettings(netdev, &jme->old_cmd); set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = { .set_pauseparam = jme_set_pauseparam, .get_wol = jme_get_wol, .set_wol = jme_set_wol, - .get_settings = jme_get_settings, - .set_settings = jme_set_settings, .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, @@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, .set_eeprom = jme_set_eeprom, + .get_link_ksettings = jme_get_link_ksettings, + .set_link_ksettings = jme_set_link_ksettings, }; static int @@ -3306,7 +3306,7 @@ jme_resume(struct device *dev) jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 58cd67c0c8e4..89535c019f04 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -447,7 +447,7 @@ struct jme_adapter { u8 chip_sub_rev; u8 pcirev; u32 msg_enable; - struct ethtool_cmd old_ecmd; + struct ethtool_link_ksettings old_cmd; unsigned int old_mtu; struct dynpcc_info dpi; atomic_t intr_sem; @@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev) /* * Function prototypes */ -static int jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd); +static int jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); static void jme_set_unicastaddr(struct net_device *netdev); static void jme_set_multi(struct net_device *netdev); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 8037426ec50f..9fae98caf83a 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget) work_done = korina_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), @@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_gset(&lp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_sset(&lp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); korina_set_carrier(&lp->mii_if); @@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index faea52da8dae..afc810069440 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) { struct ltq_etop_chan *ch = container_of(napi, struct ltq_etop_chan, napi); - int rx = 0; - int complete = 0; + int work_done = 0; - while ((rx < budget) && !complete) { + while (work_done < budget) { struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - ltq_etop_hw_receive(ch); - rx++; - } else { - complete = 1; - } + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + ltq_etop_hw_receive(ch); + work_done++; } - if (complete || !rx) { - napi_complete(&ch->napi); + if (work_done < budget) { + napi_complete_done(&ch->napi, work_done); ltq_dma_ack_irq(&ch->dma); } - return rx; + return work_done; } static int diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1fa7c03edec2..20cb7f0de601 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2319,7 +2319,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); - napi_complete(napi); + napi_complete_done(napi, work_done); wrlp(mp, INT_MASK, mp->int_mask); } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3607d8febbcf..de6c47744b8e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -224,6 +224,7 @@ #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_DEC_SENT_MASK 0xff #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 @@ -525,6 +526,7 @@ struct mvneta_tx_queue { * descriptor ring */ int count; + int pending; int tx_stop_threshold; int tx_wake_threshold; @@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, /* Only 255 descriptors can be added at once ; Assume caller * process TX desriptors in quanta less than 256 */ - val = pend_desc; + val = pend_desc + txq->pending; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + txq->pending = 0; } /* Get pointer to next TX descriptor to be processed (send) by HW */ @@ -1756,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, - struct mvneta_tx_queue *txq, int num) + struct mvneta_tx_queue *txq, int num, + struct netdev_queue *nq) { + unsigned int bytes_compl = 0, pkts_compl = 0; int i; for (i = 0; i < num; i++) { @@ -1765,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, txq->txq_get_index; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + if (skb) { + bytes_compl += skb->len; + pkts_compl++; + } + mvneta_txq_inc_get(txq); if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) @@ -1775,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, continue; dev_kfree_skb_any(skb); } + + netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } /* Handle end of transmission */ @@ -1788,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, if (!tx_done) return; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); txq->count -= tx_done; @@ -2398,12 +2410,18 @@ out: struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); + netdev_tx_sent_queue(nq, len); + txq->count += frags; if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); + if (!skb->xmit_more || netif_xmit_stopped(nq) || + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) + mvneta_txq_pend_desc_add(pp, txq, frags); + else + txq->pending += frags; + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; @@ -2422,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done = txq->count; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); /* reset txq */ txq->count = 0; @@ -2748,11 +2767,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget) rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } - budget -= rx_done; - - if (budget > 0) { + if (rx_done < budget) { cause_rx_tx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); if (pp->neta_armada3700) { unsigned long flags; @@ -2950,6 +2967,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + kfree(txq->tx_skb); if (txq->tso_hdrs) @@ -2961,6 +2980,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); + netdev_tx_reset_queue(nq); + txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 69db40e1a4e1..c2fd7c36f927 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (budget > 0) { cause_rx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); mvpp2_interrupts_enable(port); } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3af2814ada23..3376a19f1e19 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1261,7 +1261,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) } work_done = rxq_process(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); wrl(pep, INT_MASK, ALL_INTS); } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9146a514fb33..81106b73e468 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw) return supported; } -static int skge_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int skge_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = skge_supported_modes(hw); + supported = skge_supported_modes(hw); if (hw->copper) { - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; } else - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + + advertising = skge->advertising; + cmd->base.autoneg = skge->autoneg; + cmd->base.speed = skge->speed; + cmd->base.duplex = skge->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->advertising = skge->advertising; - ecmd->autoneg = skge->autoneg; - ethtool_cmd_speed_set(ecmd, skge->speed); - ecmd->duplex = skge->duplex; return 0; } -static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int skge_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); int err = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising = supported; + if (cmd->base.autoneg == AUTONEG_ENABLE) { + advertising = supported; skge->duplex = -1; skge->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; skge->speed = speed; - skge->duplex = ecmd->duplex; + skge->duplex = cmd->base.duplex; } - skge->autoneg = ecmd->autoneg; - skge->advertising = ecmd->advertising; + skge->autoneg = cmd->base.autoneg; + skge->advertising = advertising; if (netif_running(dev)) { skge_down(dev); @@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom } static const struct ethtool_ops skge_ethtool_ops = { - .get_settings = skge_get_settings, - .set_settings = skge_set_settings, .get_drvinfo = skge_get_drvinfo, .get_regs_len = skge_get_regs_len, .get_regs = skge_get_regs, @@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_phys_id = skge_set_phys_id, .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_link_ksettings = skge_get_link_ksettings, + .set_link_ksettings = skge_set_link_ksettings, }; /* diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 18d6336fa162..2b2cc3f3ca10 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); - dev->last_rx = jiffies; + sky2->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } @@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev) u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ - if (sky2->check.last == dev->last_rx && + if (sky2->check.last == sky2->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ @@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev) fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { - sky2->check.last = dev->last_rx; + sky2->check.last = sky2->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; @@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | SUPPORTED_1000baseT_Full; } -static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = sky2_supported_modes(hw); - ecmd->phy_address = PHY_ADDR_MARV; + supported = sky2_supported_modes(hw); + cmd->base.phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, sky2->speed); - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + cmd->base.port = PORT_TP; + cmd->base.speed = sky2->speed; + supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { - ethtool_cmd_speed_set(ecmd, SPEED_1000); - ecmd->port = PORT_FIBRE; - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } - ecmd->advertising = sky2->advertising; - ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + advertising = sky2->advertising; + cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - ecmd->duplex = sky2->duplex; + cmd->base.duplex = sky2->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); + u32 new_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&new_advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ~supported) + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (new_advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; @@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; sky2->speed = speed; - sky2->duplex = ecmd->duplex; + sky2->duplex = cmd->base.duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } @@ -4405,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops sky2_ethtool_ops = { - .get_settings = sky2_get_settings, - .set_settings = sky2_set_settings, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, @@ -4429,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, + .get_link_ksettings = sky2_get_link_ksettings, + .set_link_ksettings = sky2_set_link_ksettings, }; #ifdef CONFIG_SKY2_DEBUG diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ec6dcd80152b..0fe160796842 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2247,6 +2247,7 @@ struct sky2_port { u16 rx_data_size; u16 rx_nfrags; + unsigned long last_rx; struct { unsigned long last; u32 mac_rp; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 25ae0c5bce3a..9e757684816d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2515,7 +2515,7 @@ static int mtk_remove(struct platform_device *pdev) } const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt7623-eth" }, + { .compatible = "mediatek,mt2701-eth" }, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index a849da92f857..6b8635378f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) { struct mlx4_cq *cq; + rcu_read_lock(); cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); + rcu_read_unlock(); + if (!cq) { mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ ++cq->arm_sn; cq->comp(cq); @@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; struct mlx4_cq *cq; - spin_lock(&cq_table->lock); - + rcu_read_lock(); cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); - - spin_unlock(&cq_table->lock); + rcu_read_unlock(); if (!cq) { - mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ cq->event(cq, event_type); - - if (atomic_dec_and_test(&cq->refcount)) - complete(&cq->free); } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (err) return err; - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); if (err) goto err_icm; @@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, return 0; err_radix: - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); @@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock(&cq_table->lock); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != priv->eq_table.eq[MLX4_EQ_ASYNC].irq) synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - spin_lock_irq(&cq_table->lock); - radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); - if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d9c9f86a30df..ca730d4abbb4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; + u8 cur_autoneg; int ret; u32 ptys_adv = ethtool2ptys_link_modes( @@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return 0; } - proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - cpu_to_be32(ptys_adv) : - speed_set_ptys_admin(priv, speed, - ptys_reg.eth_proto_cap); + cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + + if (link_ksettings->base.autoneg == AUTONEG_DISABLE) { + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + if ((be32_to_cpu(proto_admin) & + (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) | + MLX4_PROT_MASK(MLX4_1000BASE_KX))) && + (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP)) + ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN; + } else { + proto_admin = cpu_to_be32(ptys_adv); + ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN; + } proto_admin &= ptys_reg.eth_proto_cap; if (!proto_admin) { @@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return -EINVAL; /* nothing to change due to bad input */ } - if (proto_admin == ptys_reg.eth_proto_admin) + if ((proto_admin == ptys_reg.eth_proto_admin) && + ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) && + (link_ksettings->base.autoneg == cur_autoneg))) return 0; /* Nothing to change */ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", @@ -1732,8 +1746,6 @@ static void mlx4_en_get_channels(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); - memset(channel, 0, sizeof(*channel)); - channel->max_rx = MAX_RX_RINGS; channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; @@ -1752,10 +1764,7 @@ static int mlx4_en_set_channels(struct net_device *dev, int xdp_count; int err = 0; - if (channel->other_count || channel->combined_count || - channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || - channel->rx_count > MAX_RX_RINGS || - !channel->tx_count || !channel->rx_count) + if (!channel->tx_count || !channel->rx_count) return -EINVAL; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); @@ -1793,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); - if (dev->num_tc) + if (netdev_get_num_tc(dev)) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); @@ -1985,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: - return -ENOSYS; + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 06ef23f040a4..60a021c34881 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1695,6 +1695,14 @@ int mlx4_en_start_port(struct net_device *dev) priv->port, err); goto tx_err; } + + err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); + if (err) { + en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", + dev->mtu, priv->port, err); + goto tx_err; + } + /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { @@ -1746,8 +1754,11 @@ int mlx4_en_start_port(struct net_device *dev) /* Process all completions if exist to prevent * the queues freezing if they are full */ - for (i = 0; i < priv->rx_ring_num; i++) + for (i = 0; i < priv->rx_ring_num; i++) { + local_bh_disable(); napi_schedule(&priv->rx_cq[i]->napi); + local_bh_enable(); + } netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -2275,7 +2286,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (priv->tx_ring_num[TX_XDP] && !mlx4_en_check_xdp_mtu(dev, new_mtu)) - return -ENOTSUPP; + return -EOPNOTSUPP; dev->mtu = new_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 040da4b16b1c..930f961fee42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -35,7 +35,6 @@ #define _MLX4_EN_PORT_H_ -#define SET_PORT_GEN_ALL_VALID 0x7 #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac527e25ec9..f15ddba3659a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -33,6 +33,7 @@ #include <net/busy_poll.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -706,7 +707,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, do { if (mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask, - GFP_ATOMIC | __GFP_COLD)) + GFP_ATOMIC | __GFP_COLD | + __GFP_MEMALLOC)) break; ring->prod++; } while (--missing); @@ -925,10 +927,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud length, cq->ring, &doorbell_pending))) goto consumed; + trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 5886ad78058f..3ed42199d3f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; u8 up = 0; - if (dev->num_tc) + if (netdev_get_num_tc(dev)) return skb_tx_hash(dev, skb); if (skb_vlan_tag_present(skb)) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd3638e6fe25..0509996957d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 84bab9f0732e..3fe885ce1902 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); func_cap->physical_port = field; if (func_cap->physical_port != gen_or_port) { - err = -ENOSYS; + err = -EINVAL; goto out; } @@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = - (ilog2(cache_line_size()) - 4) << 5; + ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); @@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + context->flags2 |= SET_PORT_GEN_PHV_VALID; if (phv_bit) context->phv_en |= SET_PORT_GEN_PHV_EN; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bffa6f345f2f..15ef787e71ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) */ if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); - return -ENOSYS; + return -EINVAL; } mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; @@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); - return -ENOSYS; + return -EINVAL; } dev->caps.num_ports = func_cap.num_ports; @@ -3492,7 +3492,7 @@ slave_start: mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { - err = -ENOSYS; + err = -EOPNOTSUPP; mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 88ee7d8a5923..7a495090b0bc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -487,6 +487,7 @@ struct mlx4_slave_state { bool vst_qinq_supported; u8 function; dma_addr_t vhcr_dma; + u16 user_mtu[MLX4_MAX_PORTS + 1]; u16 mtu[MLX4_MAX_PORTS + 1]; __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; @@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u16 max_user_mtu[MLX4_MAX_PORTS + 1]; u8 pptx; u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; @@ -774,7 +776,9 @@ struct mlx4_vlan_table { int max; }; -#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \ + MLX4_FLAG_V_PPRX_MASK | \ + MLX4_FLAG_V_PPTX_MASK) #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 @@ -787,7 +791,7 @@ enum { struct mlx4_set_port_general_context { u16 reserved1; - u8 v_ignore_fcs; + u8 flags2; u8 flags; union { u8 ignore_fcs; @@ -803,7 +807,8 @@ struct mlx4_set_port_general_context { u16 reserved4; u32 reserved5; u8 phv_en; - u8 reserved6[3]; + u8 reserved6[5]; + __be16 user_mtu; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index b656dd5772e5..5053c949148f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -50,7 +50,11 @@ #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL -#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) +#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) +#define MLX4_FLAG_V_MTU_MASK BIT(0) +#define MLX4_FLAG_V_PPRX_MASK BIT(1) +#define MLX4_FLAG_V_PPTX_MASK BIT(2) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) return; } +static void +mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 mtu, prev_mtu; + + /* Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) { + int i; + + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_mtu[port] = + max_t(u16, master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); +} + +static void +mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 user_mtu, prev_user_mtu; + + /* User Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + user_mtu = be16_to_cpu(gen_context->user_mtu); + user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]); + prev_user_mtu = slave_st->user_mtu[port]; + slave_st->user_mtu[port] = user_mtu; + if (user_mtu > master->max_user_mtu[port]) + master->max_user_mtu[port] = user_mtu; + if (user_mtu < prev_user_mtu && + prev_user_mtu == master->max_user_mtu[port]) { + int i; + + slave_st->user_mtu[port] = user_mtu; + master->max_user_mtu[port] = user_mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_user_mtu[port] = + max_t(u16, master->max_user_mtu[port], + master->slave_state[i].user_mtu[port]); + } + gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]); +} + +static void +mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + (gen_context->pptx != master->pptx || + gen_context->pprx != master->pprx)) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_port_info *port_info; - struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; - struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; @@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, int base; u32 in_modifier; u32 promisc; - u16 mtu, prev_mtu; int err; int i, j; int offset; @@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, is_eth = op_mod; port_info = &priv->port[port]; - /* Slaves cannot perform SET_PORT operations except changing MTU */ + /* Slaves cannot perform SET_PORT operations, + * except for changing MTU and USER_MTU. + */ if (is_eth) { if (slave != dev->caps.function && in_modifier != MLX4_SET_PORT_GENERAL && @@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, break; case MLX4_SET_PORT_GENERAL: gen_context = inbox->buf; - /* Mtu is configured as the max MTU among all the - * the functions on the port. */ - mtu = be16_to_cpu(gen_context->mtu); - mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + - ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); - prev_mtu = slave_st->mtu[port]; - slave_st->mtu[port] = mtu; - if (mtu > master->max_mtu[port]) - master->max_mtu[port] = mtu; - if (mtu < prev_mtu && prev_mtu == - master->max_mtu[port]) { - slave_st->mtu[port] = mtu; - master->max_mtu[port] = mtu; - for (i = 0; i < dev->num_slaves; i++) { - master->max_mtu[port] = - max(master->max_mtu[port], - master->slave_state[i].mtu[port]); - } - } - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); - /* Slave cannot change Global Pause configuration */ - if (slave != mlx4_master_func_num(dev) && - ((gen_context->pptx != master->pptx) || - (gen_context->pprx != master->pprx))) { - gen_context->pptx = master->pptx; - gen_context->pprx = master->pprx; - mlx4_warn(dev, - "denying Global Pause change for slave:%d\n", - slave); - } else { - master->pptx = gen_context->pptx; - master->pprx = gen_context->pprx; - } + if (gen_context->flags & MLX4_FLAG_V_MTU_MASK) + mlx4_en_set_port_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK) + mlx4_en_set_port_user_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags & + (MLX4_FLAG_V_PPRX_MASK || MLX4_FLAG_V_PPTX_MASK)) + mlx4_en_set_port_global_pause(dev, slave, + gen_context); + break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids @@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK; + context->user_mtu = cpu_to_be16(user_mtu); + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu); + int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; @@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK; if (ignore_fcs_value) context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; else diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 56185a0b827d..6fe9f76ae656 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -77,6 +77,7 @@ struct res_common { int from_state; int to_state; int removing; + const char *func_name; }; enum { @@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id) struct rb_node *node = root->rb_node; while (node) { - struct res_common *res = container_of(node, struct res_common, - node); + struct res_common *res = rb_entry(node, struct res_common, + node); if (res_id < res->res_id) node = node->rb_left; @@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res) /* Figure out where to put new node */ while (*new) { - struct res_common *this = container_of(*new, struct res_common, - node); + struct res_common *this = rb_entry(*new, struct res_common, + node); parent = *new; if (res->res_id < this->res_id) @@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } +static const char *mlx4_resource_type_to_str(enum mlx4_resource t) +{ + switch (t) { + case RES_QP: + return "QP"; + case RES_CQ: + return "CQ"; + case RES_SRQ: + return "SRQ"; + case RES_XRCD: + return "XRCD"; + case RES_MPT: + return "MPT"; + case RES_MTT: + return "MTT"; + case RES_MAC: + return "MAC"; + case RES_VLAN: + return "VLAN"; + case RES_COUNTER: + return "COUNTER"; + case RES_FS_RULE: + return "FS_RULE"; + case RES_EQ: + return "EQ"; + default: + return "INVALID RESOURCE"; + } +} + static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { @@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id, res_id); } -static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, - enum mlx4_resource type, - void *res) +static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res, const char *func_name) { struct res_common *r; int err = 0; @@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, } if (r->state == RES_ANY_BUSY) { + mlx4_warn(dev, + "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n", + func_name, slave, res_id, mlx4_resource_type_to_str(type), + r->func_name); err = -EBUSY; goto exit; } @@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, r->from_state = r->state; r->state = RES_ANY_BUSY; + r->func_name = func_name; if (res) *((struct res_common **)res) = r; @@ -881,6 +917,9 @@ exit: return err; } +#define get_res(dev, slave, res_id, type, res) \ + _get_res((dev), (slave), (res_id), (type), (res), __func__) + int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, u64 res_id, int *slave) @@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); - if (r) + if (r) { r->state = r->from_state; + r->func_name = ""; + } spin_unlock_irq(mlx4_tlock(dev)); } @@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: - return -ENOSYS; + return -EOPNOTSUPP; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: @@ -2980,6 +3021,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, put_res(dev, slave, srqn, RES_SRQ); qp->srq = srq; } + + /* Save param3 for dynamic changes from VST back to VGT */ + qp->param3 = qpc->param3; put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, mtt_base, RES_MTT); res_end_move(dev, slave, RES_QP, qpn); @@ -3772,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; u8 orig_sched_queue; - __be32 orig_param3 = qpc->param3; u8 orig_vlan_control = qpc->pri_path.vlan_control; u8 orig_fvl_rx = qpc->pri_path.fvl_rx; u8 orig_pri_path_fl = qpc->pri_path.fl; @@ -3814,7 +3857,6 @@ out: */ if (!err) { qp->sched_queue = orig_sched_queue; - qp->param3 = orig_param3; qp->vlan_control = orig_vlan_control; qp->fvl_rx = orig_fvl_rx; qp->pri_path_fl = orig_pri_path_fl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a473cea10c16..46f728de9e76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -51,6 +51,9 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) +#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -98,6 +101,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 @@ -259,6 +263,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; + u8 *pps_pin_caps; }; enum { @@ -369,6 +374,7 @@ struct mlx5e_rq { unsigned long state; int ix; + u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ struct bpf_prog *xdp_prog; @@ -567,8 +573,9 @@ struct mlx5e_vlan_table { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + bool filter_disabled; }; struct mlx5e_l2_table { @@ -776,9 +783,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, struct skb_shared_hwtstamps *hwts); void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); @@ -839,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } -static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) -{ - return min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 746a92c13644..37e66eef6fb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -37,6 +37,22 @@ enum { MLX5E_CYCLES_SHIFT = 23 }; +enum { + MLX5E_PIN_MODE_IN = 0x0, + MLX5E_PIN_MODE_OUT = 0x1, +}; + +enum { + MLX5E_OUT_PATTERN_PULSE = 0x0, + MLX5E_OUT_PATTERN_PERIODIC = 0x1, +}; + +enum { + MLX5E_EVENT_MODE_DISABLE = 0x0, + MLX5E_EVENT_MODE_REPETETIVE = 0x1, + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + mutex_lock(&priv->state_lock); /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); + mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ netdev_warn(dev, "Disabling cqe compression"); - mlx5e_modify_rx_cqe_compression(priv, false); + mlx5e_modify_rx_cqe_compression_locked(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: + mutex_unlock(&priv->state_lock); return -ERANGE; } memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; @@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + + if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + /* For future use need to add a loop for finding all 1PPS out pins */ + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); + + mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + } if (delta < 0) { neg_adj = 1; @@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) return 0; } +static int mlx5e_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u8 pattern = 0; + int pin = -1; + int err = 0; + + if (!MLX5_CAP_GEN(priv->mdev, pps) || + !MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->extts.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + } + + if (rq->extts.flags & PTP_FALLING_EDGE) + pattern = 1; + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u64 nsec_now, nsec_delta, time_stamp; + u64 cycles_now, cycles_delta; + struct timespec64 ts; + unsigned long flags; + int pin = -1; + s64 ns; + + if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->perout.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if (on) + if ((ns >> 1) != 500000000LL) + return -EINVAL; + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + + return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); +} + +static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mlx5e_extts_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return mlx5e_perout_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; +} + static const struct ptp_clock_info mlx5e_ptp_clock_info = { .owner = THIS_MODULE, .max_adj = 100000000, @@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = { .gettime64 = mlx5e_ptp_gettime, .settime64 = mlx5e_ptp_settime, .enable = NULL, + .verify = NULL, }; static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) @@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } +static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +{ + int i; + + tstamp->ptp_info.pin_config = + kzalloc(sizeof(*tstamp->ptp_info.pin_config) * + tstamp->ptp_info.n_pins, GFP_KERNEL); + if (!tstamp->ptp_info.pin_config) + return -ENOMEM; + tstamp->ptp_info.enable = mlx5e_ptp_enable; + tstamp->ptp_info.verify = mlx5e_ptp_verify; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + snprintf(tstamp->ptp_info.pin_config[i].name, + sizeof(tstamp->ptp_info.pin_config[i].name), + "mlx5_pps%d", i); + tstamp->ptp_info.pin_config[i].index = i; + tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; + tstamp->ptp_info.pin_config[i].chan = i; + } + + return 0; +} + +static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, + struct mlx5e_tstamp *tstamp) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + mlx5_query_mtpps(priv->mdev, out, sizeof(out)); + + tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); +} + +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + ptp_clock_event(tstamp->ptp, event); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; @@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + /* Initialize 1PPS data structures */ +#define MAX_PIN_NUM 8 + tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); + if (tstamp->pps_pin_caps) { + if (MLX5_CAP_GEN(priv->mdev, pps)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); + } else { + mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); + } + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { @@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } + kfree(tstamp->pps_pin_caps); + kfree(tstamp->ptp_info.pin_config); + cancel_delayed_work_sync(&tstamp->overflow_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 33a399a8b5d5..6236ce95b8e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + - NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], @@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - ch->max_combined = mlx5e_get_max_num_channels(priv->mdev); + ch->max_combined = priv->profile->max_nch(priv->mdev); ch->combined_count = priv->params.num_channels; } @@ -543,7 +560,6 @@ static int mlx5e_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct mlx5e_priv *priv = netdev_priv(dev); - int ncv = mlx5e_get_max_num_channels(priv->mdev); unsigned int count = ch->combined_count; bool arfs_enabled; bool was_opened; @@ -554,16 +570,6 @@ static int mlx5e_set_channels(struct net_device *dev, __func__); return -EINVAL; } - if (ch->rx_count || ch->tx_count) { - netdev_info(dev, "%s: separate rx/tx count not supported\n", - __func__); - return -EINVAL; - } - if (count > ncv) { - netdev_info(dev, "%s: count (%d) > max (%d)\n", - __func__, count, ncv); - return -EINVAL; - } if (priv->params.num_channels == count) return 0; @@ -1459,8 +1465,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -ENOTSUPP; @@ -1470,17 +1474,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - - if (reset) - mlx5e_close_locked(netdev); - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable); + mlx5e_modify_rx_cqe_compression_locked(priv, enable); priv->params.rx_cqe_compress_def = enable; - if (reset) - err = mlx5e_open_locked(netdev); - return err; + return 0; } static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1fe80de5d68f..92d8364e98f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, MLX5E_VLAN_RULE_TYPE_MATCH_VID, }; @@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, dest.ft = priv->fs.l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: rule_p = &priv->fs.vlan.untagged_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + rule_p = &priv->fs.vlan.any_cvlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + rule_p = &priv->fs.vlan.any_svlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, @@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.untagged_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->fs.vlan.any_vlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule); - priv->fs.vlan.any_vlan_rule = NULL; + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + if (priv->fs.vlan.any_cvlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); + priv->fs.vlan.any_cvlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + if (priv->fs.vlan.any_svlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); + priv->fs.vlan.any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: @@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } } +static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + +static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + if (err) + return err; + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { if (!priv->fs.vlan.filter_disabled) @@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) @@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) @@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ @@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (enable_promisc) { mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); @@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { if (!priv->fs.vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } @@ -976,11 +1010,13 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_NUM_VLAN_GROUPS 3 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_GROUP2_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ - MLX5E_VLAN_GROUP1_SIZE) + MLX5E_VLAN_GROUP1_SIZE +\ + MLX5E_VLAN_GROUP2_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP0_SIZE; @@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d088effd7160..4b4323f3c158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, first_vid, 0xfff); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2cc774260903..e829143efc14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,6 +31,7 @@ */ #include <net/tc_act/tc_gact.h> +#include <linux/crash_dump.h> #include <net/pkt_cls.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> @@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.rq_wq_type = rq_type; switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.mpwqe_log_stride_sz = MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : @@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; } priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); @@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) &qcnt->rx_out_of_buffer); } +static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) + return; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { - mlx5e_update_q_counter(priv); - mlx5e_update_vport_counters(priv); + mlx5e_update_pcie_counters(priv); mlx5e_update_pport_counters(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_q_counter(priv); mlx5e_update_sw_counters(priv); } @@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; + struct ptp_clock_event ptp_event; + struct mlx5_eqe *eqe = NULL; if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; @@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; - + case MLX5_DEV_EVENT_PPS: + eqe = (struct mlx5_eqe *)param; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_event.index = eqe->data.pps.pin; + ptp_event.timestamp = + timecounter_cyc2time(&priv->tstamp.clock, + be64_to_cpu(eqe->data.pps.time_stamp)); + mlx5e_pps_event_handler(vpriv, &ptp_event); + break; default: break; } @@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } -#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) -#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) - static inline int mlx5e_get_wqe_mtt_sz(void) { /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. @@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.map_dir = DMA_FROM_DEVICE; - if (rq->xdp_prog) + if (rq->xdp_prog) { rq->buff.map_dir = DMA_BIDIRECTIONAL; + rq->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rq->buff.map_dir = DMA_FROM_DEVICE; + rq->rx_headroom = MLX5_RX_HEADROOM; + } switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, byte_count = rq->buff.wqe_sz; /* calc the required page order */ - frag_sz = MLX5_RX_HEADROOM + + frag_sz = rq->rx_headroom + byte_count /* packet data */ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frag_sz = SKB_DATA_ALIGN(frag_sz); @@ -1468,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) +{ + return is_kdump_kernel() ? + MLX5E_MIN_NUM_CHANNELS : + min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) @@ -2981,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - if (min_tx_rate) - return -EOPNOTSUPP; - return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, - max_tx_rate); + max_tx_rate, min_tx_rate); } static int mlx5_vport_link2ifla(u8 esw_link) @@ -3153,11 +3203,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bool reset, was_opened; int i; - if (prog && prog->xdp_adjust_head) { - netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - mutex_lock(&priv->state_lock); if ((netdev->features & NETIF_F_LRO) && prog) { @@ -3426,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; } -static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) -{ - switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5_CAP_INLINE_MODE_L2: - *min_inline_mode = MLX5_INLINE_MODE_L2; - break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; - case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - *min_inline_mode = MLX5_INLINE_MODE_NONE; - break; - } -} - u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -3475,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); - priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_sq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ priv->params.rx_cqe_compress_def = false; @@ -3501,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; @@ -3669,14 +3700,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - mlx5e_vxlan_cleanup(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) - mlx5_eswitch_unregister_vport_rep(esw, 0); - if (priv->xdp_prog) bpf_prog_put(priv->xdp_prog); } @@ -3801,9 +3826,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) static void mlx5e_nic_disable(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + queue_work(priv->wq, &priv->set_rx_mode_work); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); mlx5e_disable_async_events(priv); - mlx5_lag_remove(priv->mdev); + mlx5_lag_remove(mdev); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0e2fb3ed1790..fd8dff6acc12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,6 +33,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <linux/bpf_trace.h> #include <net/busy_poll.h> #include "en.h" #include "en_tc.h" @@ -155,17 +156,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) { bool was_opened; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) return; - mutex_lock(&priv->state_lock); - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) - goto unlock; + return; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) @@ -176,8 +175,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) if (was_opened) mlx5e_open_locked(priv->netdev); -unlock: - mutex_unlock(&priv->state_lock); } #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) @@ -193,6 +190,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, return false; } + if (unlikely(page_is_pfmemalloc(dma_info->page))) + return false; + cache->page_cache[cache->tail] = *dma_info; cache->tail = tail_next; return true; @@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) if (unlikely(mlx5e_page_alloc_mapped(rq, di))) return -ENOMEM; - wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM); + wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom); return 0; } @@ -644,10 +644,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } -static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, +static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - unsigned int data_offset, - int len) + const struct xdp_buff *xdp) { struct mlx5e_sq *sq = &rq->channel->xdp_sq; struct mlx5_wq_cyc *wq = &sq->wq; @@ -659,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; - unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE; - void *data = page_address(di->page) + data_offset; + unsigned int dma_len = xdp->data_end - xdp->data; + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || + MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { + rq->stats.xdp_drop++; + mlx5e_page_release(rq, di, true); + return false; + } if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { if (sq->db.xdp.doorbell) { @@ -671,16 +677,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } rq->stats.xdp_tx_full++; mlx5e_page_release(rq, di, true); - return; + return false; } + dma_len -= MLX5E_XDP_MIN_INLINE; dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); memset(wqe, 0, sizeof(*wqe)); /* copy the inline part */ - memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); @@ -700,32 +707,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.xdp.doorbell = true; rq->stats.xdp_tx++; + return true; } /* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - const struct bpf_prog *prog, - struct mlx5e_dma_info *di, - void *data, u16 len) +static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { + const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; if (!prog) return false; - xdp.data = data; - xdp.data_end = xdp.data + len; + xdp.data = va + *rx_headroom; + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + act = bpf_prog_run_xdp(prog, &xdp); switch (act) { case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; return false; case XDP_TX: - mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len); + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) + trace_xdp_exception(rq->netdev, prog, act); return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: rq->stats.xdp_drop++; mlx5e_page_release(rq, di, true); @@ -740,15 +754,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + u16 rx_headroom = rq->rx_headroom; bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); - data = va + MLX5_RX_HEADROOM; + data = va + rx_headroom; dma_sync_single_range_for_cpu(rq->pdev, di->addr, - MLX5_RX_HEADROOM, + rx_headroom, rq->buff.wqe_sz, DMA_FROM_DEVICE); prefetch(data); @@ -760,8 +775,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, - cqe_bcnt); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); rcu_read_unlock(); if (consumed) return NULL; /* page/packet was consumed by XDP */ @@ -777,7 +791,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, page_ref_inc(di->page); mlx5e_page_release(rq, di, true); - skb_reserve(skb, MLX5_RX_HEADROOM); + skb_reserve(skb, rx_headroom); skb_put(skb, cqe_bcnt); return skb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 1fffe48a93cc..cbfac06b7ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n"); return true; case MLX5E_AM_GOING_RIGHT: return (am->steps_left > 1) && (am->steps_right == 1); @@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_turn: PARKING\n"); break; case MLX5E_AM_GOING_RIGHT: am->tune_state = MLX5E_AM_GOING_LEFT; @@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_step: PARKING\n"); break; case MLX5E_AM_GOING_RIGHT: if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) @@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, u32 delta_us = ktime_us_delta(end->time, start->time); unsigned int npkts = end->pkt_ctr - start->pkt_ctr; - if (!delta_us) { - WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n"); + if (!delta_us) return; - } curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index ba5db1dd23a9..53e4992d6511 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -39,7 +39,7 @@ #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ (*(u32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) @@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PHY_STATISTICAL_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_statistical_cntrs.c##_high) +#define PPORT_PHY_STATISTICAL_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ + counter_set.phys_layer_statistical_cntrs.c##_high) #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -215,6 +221,7 @@ struct mlx5e_pport_stats { __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = { { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; +static const struct counter_desc pport_phy_statistical_stats_desc[] = { + { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, +}; + static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c) + +struct mlx5e_pcie_stats { + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; +}; + +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ + (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) +#define NUM_PCIE_PERF_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) +#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) @@ -378,6 +413,7 @@ struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; + struct mlx5e_pcie_stats pcie; }; static const struct counter_desc mlx5e_pme_status_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f8829b517156..640f10f2e994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, } } +/* we get here also when setting rule to the FW failed, etc. It means that the + * flow rule itself might not exist, but some offloading related to the actions + * should be cleaned. + */ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule); - - mlx5_del_flow_rules(flow->rule); + if (!IS_ERR(flow->rule)) { + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); + } if (esw && esw->mode == SRIOV_OFFLOADS) { mlx5_eswitch_del_vlan_action(esw, flow->attr); @@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, mlx5e_detach_encap(priv, flow); } - mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; @@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + struct flow_dissector_key_control *enc_control = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + f->key); + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { struct flow_dissector_key_ports *key = skb_flow_dissector_target(f->dissector, @@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) - return -EOPNOTSUPP; - - /* udp src port isn't supported */ - if (memchr_inv(&mask->src, 0, sizeof(mask->src))) - return -EOPNOTSUPP; + goto vxlan_match_offload_err; if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); - else + else { + netdev_warn(priv->netdev, + "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); return -EOPNOTSUPP; + } MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_sport, ntohs(key->src)); } else { /* udp dst port must be given */ - return -EOPNOTSUPP; +vxlan_match_offload_err: + netdev_warn(priv->netdev, + "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); + return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_dissector_key_ipv4_addrs *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, @@ -280,10 +295,36 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, ntohl(key->dst)); - } - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); + } /* Enforce DMAC when offloading incoming tunneled flows. * Flow counters require a match on the DMAC. @@ -343,6 +384,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->key); switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: if (parse_tunnel_attr(priv, spec, f)) return -EOPNOTSUPP; break; @@ -375,6 +417,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, key->flags & FLOW_DIS_IS_FRAGMENT); + + /* the HW doesn't need L3 inline to match on frag=no */ + if (key->flags & FLOW_DIS_IS_FRAGMENT) + *min_inline = MLX5_INLINE_MODE_IP; } } @@ -438,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_VLAN, f->mask); if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); @@ -622,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } -static inline int cmp_encap_info(struct mlx5_encap_info *a, - struct mlx5_encap_info *b) +static inline int cmp_encap_info(struct ip_tunnel_key *a, + struct ip_tunnel_key *b) { return memcmp(a, b, sizeof(*a)); } -static inline int hash_encap_info(struct mlx5_encap_info *info) +static inline int hash_encap_info(struct ip_tunnel_key *key) { - return jhash(info, sizeof(*info), 0); + return jhash(key, sizeof(*key), 0); } static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, @@ -638,44 +684,81 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - __be32 *saddr, int *out_ttl) { struct rtable *rt; struct neighbour *n = NULL; - int ttl; #if IS_ENABLED(CONFIG_INET) + int ret; + rt = ip_route_output_key(dev_net(mirred_dev), fl4); - if (IS_ERR(rt)) { - pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr); - return -EOPNOTSUPP; - } + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; #else return -EOPNOTSUPP; #endif if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { - pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n", - __func__); + pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); ip_rt_put(rt); return -EOPNOTSUPP; } - ttl = ip4_dst_hoplimit(&rt->dst); + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) return -ENOMEM; *out_n = n; - *saddr = fl4->saddr; - *out_ttl = ttl; *out_dev = rt->dst.dev; return 0; } +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + int *out_ttl) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + + dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); + if (dst->error) { + ret = dst->error; + dst_release(dst); + return ret; + } + + *out_ttl = ip6_dst_hoplimit(dst); + + /* if the egress device isn't on the same HW e-switch, we use the uplink */ + if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) + *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + else + *out_dev = dst->dev; +#else + return -EOPNOTSUPP; +#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; + return 0; +} + static int gen_vxlan_header_ipv4(struct net_device *out_dev, char buf[], unsigned char h_dest[ETH_ALEN], @@ -712,19 +795,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, return encap_size; } +static int gen_vxlan_header_ipv6(struct net_device *out_dev, + char buf[], + unsigned char h_dest[ETH_ALEN], + int ttl, + struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 udp_dst_port, + __be32 vx_vni) +{ + int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; + struct ethhdr *eth = (struct ethhdr *)buf; + struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); + struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); + struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + memset(buf, 0, encap_size); + + ether_addr_copy(eth->h_dest, h_dest); + ether_addr_copy(eth->h_source, out_dev->dev_addr); + eth->h_proto = htons(ETH_P_IPV6); + + ip6_flow_hdr(ip6h, 0, 0); + /* the HW fills up ipv6 payload len */ + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + udp->dest = udp_dst_port; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vx_vni); + + return encap_size; +} + static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5_encap_entry *e, struct net_device **out_dev) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, ttl, err; + struct neighbour *n = NULL; struct flowi4 fl4 = {}; - struct neighbour *n; char *encap_header; - int encap_size; - __be32 saddr; - int ttl; - int err; encap_header = kzalloc(max_encap_size, GFP_KERNEL); if (!encap_header) @@ -733,36 +849,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: fl4.flowi4_proto = IPPROTO_UDP; - fl4.fl4_dport = e->tun_info.tp_dst; + fl4.fl4_dport = tun_key->tp_dst; break; default: err = -EOPNOTSUPP; goto out; } - fl4.daddr = e->tun_info.daddr; + fl4.flowi4_tos = tun_key->tos; + fl4.daddr = tun_key->u.ipv4.dst; + fl4.saddr = tun_key->u.ipv4.src; err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, - &fl4, &n, &saddr, &ttl); + &fl4, &n, &ttl); if (err) goto out; + if (!(n->nud_state & NUD_VALID)) { + pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + err = -EOPNOTSUPP; + goto out; + } + e->n = n; e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + e->h_dest, ttl, + fl4.daddr, + fl4.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + encap_size, encap_header, &e->encap_id); +out: + if (err && n) + neigh_release(n); + kfree(encap_header); + return err; +} + +static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5_encap_entry *e, + struct net_device **out_dev) + +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, err, ttl = 0; + struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + char *encap_header; + + encap_header = kzalloc(max_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = tun_key->tp_dst; + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + fl6.daddr = tun_key->u.ipv6.dst; + fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, + &fl6, &n, &ttl); + if (err) + goto out; + if (!(n->nud_state & NUD_VALID)) { - err = -ENOTSUPP; + pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); + err = -EOPNOTSUPP; goto out; } + e->n = n; + e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: - encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, e->h_dest, ttl, - e->tun_info.daddr, - saddr, e->tun_info.tp_dst, - e->tun_info.tun_id); + &fl6.daddr, + &fl6.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); break; default: err = -EOPNOTSUPP; @@ -772,6 +960,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, encap_size, encap_header, &e->encap_id); out: + if (err && n) + neigh_release(n); kfree(encap_header); return err; } @@ -784,40 +974,38 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; - struct mlx5_encap_info info; struct mlx5_encap_entry *e; struct net_device *out_dev; + int tunnel_type, err = -EOPNOTSUPP; uintptr_t hash_key; bool found = false; - int tunnel_type; - int err; - /* udp dst port must be given */ + /* udp dst port must be set */ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) + goto vxlan_encap_offload_err; + + /* setting udp src port isn't supported */ + if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { +vxlan_encap_offload_err: + netdev_warn(priv->netdev, + "must set udp dst port and not set udp src port\n"); return -EOPNOTSUPP; + } if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - info.tp_dst = key->tp_dst; - info.tun_id = tunnel_id_to_key32(key->tun_id); tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { + netdev_warn(priv->netdev, + "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); return -EOPNOTSUPP; } - switch (family) { - case AF_INET: - info.daddr = key->u.ipv4.dst; - break; - default: - return -EOPNOTSUPP; - } - - hash_key = hash_encap_info(&info); + hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info, &info)) { + if (!cmp_encap_info(&e->tun_info.key, key)) { found = true; break; } @@ -832,11 +1020,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, if (!e) return -ENOMEM; - e->tun_info = info; + e->tun_info = *tun_info; e->tunnel_type = tunnel_type; INIT_LIST_HEAD(&e->flows); - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + if (family == AF_INET) + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + else if (family == AF_INET6) + err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); + if (err) goto out_err; @@ -986,7 +1178,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, if (IS_ERR(flow->rule)) { err = PTR_ERR(flow->rule); - goto err_free; + goto err_del_rule; } err = rhashtable_insert_fast(&tc->ht, &flow->node, @@ -997,7 +1189,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, goto out; err_del_rule: - mlx5_del_flow_rules(flow->rule); + mlx5e_tc_del_flow(priv, flow); err_free: kfree(flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 5130d65dd41a..ea5d8d37a75c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -154,6 +154,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: return "MLX5_EVENT_TYPE_PAGE_FAULT"; + case MLX5_EVENT_TYPE_PPS_EVENT: + return "MLX5_EVENT_TYPE_PPS_EVENT"; default: return "Unrecognized event"; } @@ -470,6 +472,10 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) mlx5_port_module_event(dev, eqe); break; + case MLX5_EVENT_TYPE_PPS_EVENT: + if (dev->event) + dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); + break; default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -684,6 +690,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); + if (MLX5_CAP_GEN(dev, pps)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f14d9c9ba773..efa1a7a76d8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); @@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); @@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); @@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->info.vlan || vport->info.qos) - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); if (vport->info.spoofchk) { MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); @@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); @@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) } static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, - u32 initial_max_rate) + u32 initial_max_rate, u32 initial_bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, initial_max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) } static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, - u32 max_rate) + u32 max_rate, u32 bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_apply_vport_conf(esw, vport); /* Attach vport to the eswitch rate limiter */ - if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate, + vport->qos.bw_share)) esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ @@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); @@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate) +static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); struct mlx5_vport *evport; + u32 max_guarantee = 0; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled || evport->info.min_rate < max_guarantee) + continue; + max_guarantee = evport->info.min_rate; + } + + return max_t(u32, max_guarantee / fw_max_bw_share, 1); +} + +static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 vport_max_rate; + u32 vport_min_rate; + u32 bw_share; + int err; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled) + continue; + vport_min_rate = evport->info.min_rate; + vport_max_rate = evport->info.max_rate; + bw_share = MLX5_MIN_BW_SHARE; + + if (vport_min_rate) + bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, + divider, + fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_vport_qos_config(esw, i, vport_max_rate, + bw_share); + if (!err) + evport->qos.bw_share = bw_share; + else + return err; + } + + return 0; +} + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + struct mlx5_vport *evport; + u32 previous_min_rate; + u32 divider; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) + return -EOPNOTSUPP; mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - err = esw_vport_qos_config(esw, vport, max_rate); + + if (min_rate == evport->info.min_rate) + goto set_max_rate; + + previous_min_rate = evport->info.min_rate; + evport->info.min_rate = min_rate; + divider = calculate_vports_min_rate_divider(esw); + err = normalize_vports_min_rate(esw, divider); + if (err) { + evport->info.min_rate = previous_min_rate; + goto unlock; + } + +set_max_rate: + if (max_rate == evport->info.max_rate) + goto unlock; + + err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share); if (!err) evport->info.max_rate = max_rate; +unlock: mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 8661dd3f542c..5b78883d5654 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,6 +36,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> #include <net/devlink.h> +#include <net/ip_tunnels.h> #include <linux/mlx5/device.h> #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -49,6 +50,11 @@ #define FDB_UPLINK_VPORT 0xffff +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -115,6 +121,7 @@ struct mlx5_vport_info { u8 qos; u64 node_guid; int link_state; + u32 min_rate; u32 max_rate; bool spoofchk; bool trusted; @@ -137,6 +144,7 @@ struct mlx5_vport { struct { bool enabled; u32 esw_tsar_ix; + u32 bw_share; } qos; bool enabled; @@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate); +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -274,18 +282,12 @@ enum { #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 -struct mlx5_encap_info { - __be32 daddr; - __be32 tun_id; - __be16 tp_dst; -}; - struct mlx5_encap_entry { struct hlist_node encap_hlist; struct list_head flows; u32 encap_id; struct neighbour *n; - struct mlx5_encap_info tun_info; + struct ip_tunnel_info tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 03293ed1cc22..348182599294 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -402,19 +402,18 @@ out: } #define MAX_PF_SQ 256 -#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ #define ESW_OFFLOADS_NUM_GROUPS 4 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int table_size, ix, esw_size, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; struct mlx5_flow_group *g; u32 *flow_group_in; void *match_criteria; - int table_size, ix, err = 0; u32 flags = 0; flow_group_in = mlx5_vzalloc(inlen); @@ -427,15 +426,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), + MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + + esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - ESW_OFFLOADS_NUM_ENTRIES, + esw_size, ESW_OFFLOADS_NUM_GROUPS, 0, flags); if (IS_ERR(fdb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c4478ecd8056..b5253b59e8fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, int err; u32 *in; - if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) + if (size > max_encap_size) { + mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", + size, max_encap_size); return -EINVAL; + } - in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ac7a2fc916c..85ff4b843b4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 5718aada6605..d0bbefa08af7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -91,6 +91,20 @@ out: } EXPORT_SYMBOL(mlx5_core_query_vendor_id); +static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_pcam_reg(dev, dev->caps.pcam, + MLX5_PCAM_FEATURE_ENHANCED_FEATURES, + MLX5_PCAM_REGS_5000_TO_507F); +} + +static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_mcam_reg(dev, dev->caps.mcam, + MLX5_MCAM_FEATURE_ENHANCED_FEATURES, + MLX5_MCAM_REGS_FIRST_128); +} + int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; @@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, pcam_reg)) + mlx5_get_pcam_reg(dev); + + if (MLX5_CAP_GEN(dev, mcam_reg)) + mlx5_get_mcam_reg(dev); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 5bcf93422ee0..d0515391d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd) } } -static u16 get_maj(u32 fw) -{ - return fw >> 28; -} - -static u16 get_min(u32 fw) -{ - return fw >> 16 & 0xfff; -} - -static u16 get_sub(u32 fw) -{ - return fw & 0xffff; -} - static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); - fw = ioread32be(&h->fw_ver); - sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw)); + sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + fw = ioread32be(&h->fw_ver); + dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cb7708b45bb1..84f7970c5080 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -418,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->hca_caps_max[cap_type], hca_caps, + memcpy(dev->caps.hca_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->hca_caps_cur[cap_type], hca_caps, + memcpy(dev->caps.hca_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -513,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", @@ -1217,7 +1217,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, { int err = 0; - mlx5_drain_health_wq(dev); + if (cleanup) + mlx5_drain_health_wq(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { @@ -1339,9 +1340,7 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } - err = request_module_nowait(MLX5_IB_MOD); - if (err) - pr_info("failed request module on %s\n", MLX5_IB_MOD); + request_module_nowait(MLX5_IB_MOD); err = devlink_register(devlink, &pdev->dev); if (err) @@ -1402,9 +1401,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv, false); - /* In case of kernel call save the pci state */ + /* In case of kernel call save the pci state and drain the health wq */ if (state) { pci_save_state(pdev); + mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 74241e82de63..b3dabe6e8836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -113,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group); +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, + u8 access_reg_group); + void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); @@ -138,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); +int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); + void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d2ec9d232a70..969e352435ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -74,6 +74,30 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(pcam_reg); + + MLX5_SET(pcam_reg, in, feature_group, feature_group); + MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0); +} + +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(mcam_reg); + + MLX5_SET(mcam_reg, in, feature_group, feature_group); + MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0); +} + struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; @@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) module_num, mlx5_pme_status[module_status - 1], mlx5_pme_error[error_type]); } + +int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out, + sizeof(out), MLX5_REG_MTPPS, 0, 1); +} + +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + int err = 0; + + MLX5_SET(mtppse_reg, in, pin, pin); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 0); + if (err) + return err; + + *arm = MLX5_GET(mtppse_reg, in, event_arm); + *mode = MLX5_GET(mtppse_reg, in, event_generation_mode); + + return err; +} + +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + + MLX5_SET(mtppse_reg, in, pin, pin); + MLX5_SET(mtppse_reg, in, event_arm, arm); + MLX5_SET(mtppse_reg, in, event_generation_mode, mode); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 1); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 269e4401c342..b49cfc895c10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); + break; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(mlx5_query_min_inline); + int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index d147ddd97997..0af3338bfcb4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); /* pci_eqe_cmd_token * Command completion event - token */ -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); +MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16); /* pci_eqe_cmd_status * Command completion event - status */ -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); +MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8); /* pci_eqe_cmd_out_param_h * Command completion event - output parameter - higher part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32); /* pci_eqe_cmd_out_param_l * Command completion event - output parameter - lower part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1357fe04391b..9fb0316b3a30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4965,6 +4965,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MPSC - Monitoring Packet Sampling Configuration Register + * -------------------------------------------------------- + * MPSC Register is used to configure the Packet Sampling mechanism. + */ +#define MLXSW_REG_MPSC_ID 0x9080 +#define MLXSW_REG_MPSC_LEN 0x1C + +MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); + +/* reg_mpsc_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); + +/* reg_mpsc_e + * Enable sampling on port local_port + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); + +#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL + +/* reg_mpsc_rate + * Sampling rate = 1 out of rate packets (with randomization around + * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); + +static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, + u32 rate) +{ + MLXSW_REG_ZERO(mpsc, payload); + mlxsw_reg_mpsc_local_port_set(payload, local_port); + mlxsw_reg_mpsc_e_set(payload, e); + mlxsw_reg_mpsc_rate_set(payload, rate); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5429,6 +5469,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpat), MLXSW_REG(mpar), MLXSW_REG(mlcr), + MLXSW_REG(mpsc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d0e803f4f775..467aa5288935 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -57,6 +57,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <net/netevent.h> +#include <net/tc_act/tc_sample.h> #include "spectrum.h" #include "pci.h" @@ -469,6 +470,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); } +static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable, u32 rate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mpsc_pl[MLXSW_REG_MPSC_LEN]; + + mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -684,6 +695,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } if (eth_skb_pad(skb)) { @@ -1217,6 +1229,51 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); } +static int +mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + int err; + + if (!mlxsw_sp_port->sample) + return -EOPNOTSUPP; + if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { + netdev_err(mlxsw_sp_port->dev, "sample already active\n"); + return -EEXIST; + } + if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { + netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); + return -EOPNOTSUPP; + } + + rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, + tcf_sample_psample_group(a)); + mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); + mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); + mlxsw_sp_port->sample->rate = tcf_sample_rate(a); + + err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); + if (err) + goto err_port_sample_set; + return 0; + +err_port_sample_set: + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); + return err; +} + +static void +mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!mlxsw_sp_port->sample) + return; + + mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); +} + static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, __be16 protocol, struct tc_cls_matchall_offload *cls, @@ -1247,6 +1304,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mirror = &mall_tc_entry->mirror; err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, mirror, a, ingress); + } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { + mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, + a, ingress); } else { err = -EOPNOTSUPP; } @@ -1280,6 +1341,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, &mall_tc_entry->mirror); break; + case MLXSW_SP_PORT_MALL_SAMPLE: + mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); + break; default: WARN_ON(1); } @@ -2258,6 +2322,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_alloc_stats; } + mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), + GFP_KERNEL); + if (!mlxsw_sp_port->sample) { + err = -ENOMEM; + goto err_alloc_sample; + } + mlxsw_sp_port->hw_stats.cache = kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); @@ -2386,6 +2457,8 @@ err_dev_addr_init: err_port_swid_set: kfree(mlxsw_sp_port->hw_stats.cache); err_alloc_hw_stats: + kfree(mlxsw_sp_port->sample); +err_alloc_sample: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -2432,6 +2505,7 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); kfree(mlxsw_sp_port->hw_stats.cache); + kfree(mlxsw_sp_port->sample); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); @@ -2733,6 +2807,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } +static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct psample_group *psample_group; + u32 size; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", + local_port); + goto out; + } + if (unlikely(!mlxsw_sp_port->sample)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", + local_port); + goto out; + } + + size = mlxsw_sp_port->sample->truncate ? + mlxsw_sp_port->sample->trunc_size : skb->len; + + rcu_read_lock(); + psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); + if (!psample_group) + goto out_unlock; + psample_sample_packet(psample_group, skb, size, + mlxsw_sp_port->dev->ifindex, 0, + mlxsw_sp_port->sample->rate); +out_unlock: + rcu_read_unlock(); +out: + consume_skb(skb); +} + #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2768,6 +2877,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + /* PKT Sample trap */ + MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, + false, SP_IP2ME, DISCARD) }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index cc1af19d699a..bc3efe1629c4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -46,6 +46,7 @@ #include <linux/dcbnl.h> #include <linux/in6.h> #include <linux/notifier.h> +#include <net/psample.h> #include "port.h" #include "core.h" @@ -229,6 +230,7 @@ struct mlxsw_sp_span_entry { enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, + MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { @@ -315,6 +317,13 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; +struct mlxsw_sp_port_sample { + struct psample_group __rcu *psample_group; + u32 trunc_size; + u32 rate; + bool truncate; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -361,6 +370,7 @@ struct mlxsw_sp_port { struct rtnl_link_stats64 *cache; struct delayed_work update_dw; } hw_stats; + struct mlxsw_sp_port_sample *sample; }; struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 01d0efa9c5c7..9e494a446b7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, static int mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) + struct mlxsw_sp_nexthop_group *nh_grp, + bool reallocate) { u32 adj_index = nh_grp->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; @@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, continue; } - if (nh->update) { + if (nh->update || reallocate) { err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, adj_index, nh); if (err) @@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* Nothing was added or removed, so no need to reallocate. Just * update MAC on existing adjacency indexes. */ - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, + false); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, nh_grp->adj_index_valid = 1; nh_grp->adj_index = adj_index; nh_grp->ecmp_size = ecmp_size; - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 696d40612d28..169193ee3d4b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7ab275deacac..02ea48b15eb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index e7e1aff40bd9..955d69a8e8d3 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -84,7 +84,6 @@ union ks8851_tx_hdr { * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. - * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * @vdd_reg: Optional regulator supplying the chip * @vdd_io: Optional digital power supply for IO @@ -120,7 +119,6 @@ struct ks8851_net { u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; - u16 eeprom_size; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; @@ -1533,11 +1531,6 @@ static int ks8851_probe(struct spi_device *spi) /* cache the contents of the CCR register for EEPROM, etc. */ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); - if (ks->rc_ccr & CCR_EEPROM) - ks->eeprom_size = 128; - else - ks->eeprom_size = 0; - ks8851_read_selftest(ks); ks8851_init_mac(ks); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 9774b50cff6e..06c9f4100cb9 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -269,7 +269,7 @@ rx_next: } if (rx < budget) { - napi_complete(napi); + napi_complete_done(napi, rx); } priv->reg_imr |= RPKT_FINISH_M; @@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev) spin_unlock_irq(&priv->txlock); } -static struct net_device_ops moxart_netdev_ops = { +static const struct net_device_ops moxart_netdev_ops = { .ndo_open = moxart_mac_open, .ndo_stop = moxart_mac_stop, .ndo_start_xmit = moxart_mac_start_xmit, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index db297cfce6f4..3d881176a353 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1678,7 +1678,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) myri10ge_ss_unlock_napi(ss); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); put_be32(htonl(3), ss->irq_claim); } return work_done; diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 90eac63f9606..8e72679c015f 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2265,7 +2265,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 564f682fa4dc..203abcb0c252 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /*Re Enable MSI-Rx Vector*/ addr = (u8 __iomem *)&bar0->xmsi_mask_reg; addr += 7 - ring->ring_no; @@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) break; } if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ writeq(0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index f364502229db..6a4310af5d97 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_vpath_poll_rx(ring->handle); pkts_processed = ring->pkts_processed; - if (ring->pkts_processed < budget_org) { - napi_complete(napi); + if (pkts_processed < budget_org) { + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( @@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) VXGE_COMPLETE_ALL_TX(vdev); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ vxge_hw_device_unmask_all(hldev); vxge_hw_device_flush_io(hldev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 67afd95ffb93..6ac43abf561b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -42,6 +42,7 @@ */ #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -1459,7 +1460,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, dev_kfree_skb_any(skb); } -static void +static bool nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, @@ -1473,13 +1474,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (unlikely(nfp_net_tx_full(tx_ring, 1))) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); if (unlikely(!new_frag)) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); @@ -1509,6 +1510,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, tx_ring->wr_p++; tx_ring->wr_ptr_add++; + return true; } static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) @@ -1613,12 +1615,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) case XDP_PASS: break; case XDP_TX: - nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len); + if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + tx_ring, rxbuf, + pkt_off, pkt_len))) + trace_xdp_exception(nn->netdev, xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(nn->netdev, xdp_prog, act); case XDP_DROP: nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index dfc2c8149d22..58ba5d3f9e5f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3749,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - napi_complete(napi); + napi_complete_done(napi, rx_work); writel(np->irqmask, base + NvRegIrqMask); } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index dd6b0d0f7fa5..9c7ffd649e9a 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget) rx_done = __lpc_handle_recv(ndev, budget); if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); lpc_eth_enable_int(pldat->net_base); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index d461f419948e..f9e4e8eca665 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2385,7 +2385,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) poll_end_flag = true; if (poll_end_flag) { - napi_complete(napi); + napi_complete_done(napi, work_done); pch_gbe_irq_enable(adapter); } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index badfa1d562a4..49591d9c2e1b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ - napi_complete(napi); + napi_complete_done(napi, pkts); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 86fb9d3df700..0cf8a3703275 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2396,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index c92a8506c1e1..7520eb34ad00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1846,7 +1846,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_fill_dev_info(cdev, &info->common); if (IS_VF(cdev)) - memset(info->common.hw_mac, 0, ETH_ALEN); + eth_zero_addr(info->common.hw_mac); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 05e32f4322eb..02c5d47cfc6d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -320,7 +320,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -332,7 +332,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->gsi_enable) + if (p_ll2_conn->conn.gsi_enable) qed_ll2b_release_tx_gsi_packet(p_hwfn, p_ll2_conn-> my_id, @@ -401,7 +401,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) spin_unlock_irqrestore(&p_tx->lock, flags); tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->gsi_enable) + if (p_ll2_conn->conn.gsi_enable) qed_ll2b_complete_tx_gsi_packet(p_hwfn, p_ll2_conn->my_id, p_pkt->cookie, @@ -573,7 +573,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -761,7 +761,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, p_buffer->vlan, bd_flags, l4_hdr_offset_w, - p_ll2_conn->tx_dest, 0, + p_ll2_conn->conn.tx_dest, 0, first_frag, p_buffer->packet_length, p_buffer, true); @@ -881,7 +881,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, u16 buf_idx; int rc = 0; - if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) return rc; if (!rx_num_ooo_buffers) @@ -924,7 +924,7 @@ static void qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -936,7 +936,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, { struct qed_ooo_buffer *p_buffer; - if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -968,23 +968,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; - struct qed_ll2_info *ll2_info; + struct qed_ll2_conn ll2_info; int rc; - ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); - if (!ll2_info) - return -ENOMEM; - ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; - ll2_info->mtu = params->mtu; - ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; - ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; - ll2_info->tx_tc = OOO_LB_TC; - ll2_info->tx_dest = CORE_TX_DEST_LB; - - rc = qed_ll2_acquire_connection(hwfn, ll2_info, + ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; + ll2_info.mtu = params->mtu; + ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; + ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; + ll2_info.tx_tc = OOO_LB_TC; + ll2_info.tx_dest = CORE_TX_DEST_LB; + + rc = qed_ll2_acquire_connection(hwfn, &ll2_info, QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, handle); - kfree(ll2_info); if (rc) { DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); goto out; @@ -1029,7 +1025,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, u8 action_on_error) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct core_rx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1055,7 +1051,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->complete_event_flg = 1; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); @@ -1063,8 +1059,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); - p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; - p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; + p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg; + p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 : 1; @@ -1079,14 +1075,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, } p_ramrod->action_on_error.error_type = action_on_error; - p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct core_tx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1098,7 +1094,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1119,7 +1115,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; @@ -1129,7 +1125,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->pbl_size = cpu_to_le16(pbl_size); memset(&pq_params, 0, sizeof(pq_params)); - pq_params.core.tc = p_ll2_conn->tx_tc; + pq_params.core.tc = p_ll2_conn->conn.tx_tc; pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); p_ramrod->qm_pq_id = cpu_to_le16(pq_id); @@ -1146,7 +1142,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); } - p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -1247,7 +1243,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn_type, rx_num_desc); + p_ll2_info->conn.conn_type, rx_num_desc); out: return rc; @@ -1285,7 +1281,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn_type, tx_num_desc); + p_ll2_info->conn.conn_type, tx_num_desc); out: if (rc) @@ -1296,7 +1292,7 @@ out: } int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_params, + struct qed_ll2_conn *p_params, u16 rx_num_desc, u16 tx_num_desc, u8 *p_connection_handle) @@ -1325,15 +1321,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, if (!p_ll2_info) return -EBUSY; - p_ll2_info->conn_type = p_params->conn_type; - p_ll2_info->mtu = p_params->mtu; - p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg; - p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en; - p_ll2_info->tx_tc = p_params->tx_tc; - p_ll2_info->tx_dest = p_params->tx_dest; - p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big; - p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf; - p_ll2_info->gsi_enable = p_params->gsi_enable; + p_ll2_info->conn = *p_params; rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); if (rc) @@ -1394,9 +1382,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, - p_ll2_conn->ai_err_packet_too_big); + p_ll2_conn->conn.ai_err_packet_too_big); SET_FIELD(action_on_error, - CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); + CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf); return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); } @@ -1623,7 +1611,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", p_ll2->queue_id, p_ll2->cid, - p_ll2->conn_type, + p_ll2->conn.conn_type, prod_idx, first_frag_len, num_of_bds, @@ -1699,7 +1687,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, - p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); + p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, @@ -1840,7 +1828,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } - if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); return rc; @@ -2016,7 +2004,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) { - struct qed_ll2_info ll2_info; + struct qed_ll2_conn ll2_info; struct qed_ll2_buffer *buffer, *tmp_buffer; enum qed_ll2_conn_type conn_type; struct qed_ptt *p_ptt; @@ -2064,6 +2052,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) /* Prepare the temporary ll2 information */ memset(&ll2_info, 0, sizeof(ll2_info)); + ll2_info.conn_type = conn_type; ll2_info.mtu = params->mtu; ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; @@ -2143,7 +2132,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); - return 0; release_terminate_all: diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index c7f2975590ee..db3e4fc78e09 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -135,15 +135,8 @@ struct qed_ll2_tx_queue { bool b_completing_packet; }; -struct qed_ll2_info { - /* Lock protecting the state of LL2 */ - struct mutex mutex; +struct qed_ll2_conn { enum qed_ll2_conn_type conn_type; - u32 cid; - u8 my_id; - u8 queue_id; - u8 tx_stats_id; - bool b_active; u16 mtu; u8 rx_drop_ttl0_flg; u8 rx_vlan_removal_en; @@ -151,10 +144,21 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; enum core_error_handle ai_err_packet_too_big; enum core_error_handle ai_err_no_buf; + u8 gsi_enable; +}; + +struct qed_ll2_info { + /* Lock protecting the state of LL2 */ + struct mutex mutex; + struct qed_ll2_conn conn; + u32 cid; + u8 my_id; + u8 queue_id; + u8 tx_stats_id; + bool b_active; u8 tx_stats_en; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - u8 gsi_enable; }; /** @@ -172,7 +176,7 @@ struct qed_ll2_info { * @return 0 on success, failure otherwise */ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_params, + struct qed_ll2_conn *p_params, u16 rx_num_desc, u16 tx_num_desc, u8 *p_connection_handle); diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index bd4cad2b343b..c3c8c5018e93 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_roce_ll2_info *roce_ll2; - struct qed_ll2_info ll2_params; + struct qed_ll2_conn ll2_params; int rc; if (!params) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b1213643bbfd..3f4bf31f45e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1224,7 +1224,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) return; /* Clear the VF mac */ - memset(vf_info->mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->mac); vf_info->rx_accept_mode = 0; vf_info->tx_accept_mode = 0; @@ -2626,8 +2626,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(p_vf->shadow_config.macs[i], p_params->mac)) { - memset(p_vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); break; } } @@ -2640,7 +2639,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) - memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); } /* List the new MAC address */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1a6ca4884fad..26848eed3bc1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -32,6 +32,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> +#include <linux/bpf_trace.h> #include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> @@ -1016,6 +1017,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, /* We need the replacement buffer before transmit. */ if (qede_alloc_rx_buffer(rxq, true)) { qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); return false; } @@ -1026,6 +1028,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); } /* Regardless, we've consumed an Rx BD */ @@ -1035,6 +1038,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(edev->ndev, prog, act); case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } @@ -1368,7 +1372,7 @@ int qede_poll(struct napi_struct *napi, int budget) qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { if (!qede_poll_is_more_work(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* Update and reenable interrupts */ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index fedd7366713c..84dd83031a1b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); qlcnic_enable_tx_intr(adapter, tx_ring); @@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) work_done = qlcnic_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) adapter = sds_ring->adapter; work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 1409412ab39d..e9e647072596 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0b4deb31e742..b991219862b1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -103,14 +103,6 @@ #define RXEN 0x00000002 #define TXEN 0x00000001 - -/* EMAC_WOL_CTRL0 */ -#define LK_CHG_PME 0x20 -#define LK_CHG_EN 0x10 -#define MG_FRAME_PME 0x8 -#define MG_FRAME_EN 0x4 -#define WK_FRAME_EN 0x1 - /* EMAC_DESC_CTRL_3 */ #define RFD_RING_SIZE_BMSK 0xfff @@ -314,8 +306,6 @@ struct emac_skb_cb { RX_PKT_INT2 |\ RX_PKT_INT3) -#define EMAC_MAC_IRQ_RES "core0" - void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) { u32 crc32, bit, reg, mta; @@ -558,7 +548,7 @@ void emac_mac_reset(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); } -void emac_mac_start(struct emac_adapter *adpt) +static void emac_mac_start(struct emac_adapter *adpt) { struct phy_device *phydev = adpt->phydev; u32 mac, csr1; @@ -621,8 +611,6 @@ void emac_mac_start(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL, (HEADER_ENABLE | HEADER_CNT_EN), 0); - - emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); } void emac_mac_stop(struct emac_adapter *adpt) @@ -963,12 +951,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; - if (phydev->link) + if (phydev->link) { emac_mac_start(adpt); - else + sgmii->link_up(adpt); + } else { + sgmii->link_down(adpt); emac_mac_stop(adpt); + } phy_print_status(phydev); } @@ -977,40 +969,26 @@ static void emac_adjust_link(struct net_device *netdev) int emac_mac_up(struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct emac_irq *irq = &adpt->irq; int ret; emac_mac_rx_tx_ring_reset_all(adpt); emac_mac_config(adpt); - - ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq); - if (ret) { - netdev_err(adpt->netdev, "could not request %s irq\n", - EMAC_MAC_IRQ_RES); - return ret; - } - emac_mac_rx_descs_refill(adpt, &adpt->rx_q); + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, PHY_INTERFACE_MODE_SGMII); if (ret) { netdev_err(adpt->netdev, "could not connect phy\n"); - free_irq(irq->irq, irq); return ret; } + phy_attached_print(adpt->phydev, NULL); + /* enable mac irq */ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); - /* Enable pause frames. Without this feature, the EMAC has been shown - * to receive (and drop) frames with FCS errors at gigabit connections. - */ - adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); napi_enable(&adpt->rx_q.napi); @@ -1036,7 +1014,6 @@ void emac_mac_down(struct emac_adapter *adpt) writel(DIS_INT, adpt->base + EMAC_INT_STATUS); writel(0, adpt->base + EMAC_INT_MASK); synchronize_irq(adpt->irq.irq); - free_irq(adpt->irq.irq, &adpt->irq); phy_disconnect(adpt->phydev); @@ -1213,7 +1190,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd), (bool)RRD_CVTAG(&rrd)); - netdev->last_rx = jiffies; (*num_pkts)++; } while (*num_pkts < max_pkts); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index f3aa24dc4a29..5028fb4bec2b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -230,7 +230,6 @@ struct emac_adapter; int emac_mac_up(struct emac_adapter *adpt); void emac_mac_down(struct emac_adapter *adpt); void emac_mac_reset(struct emac_adapter *adpt); -void emac_mac_start(struct emac_adapter *adpt); void emac_mac_stop(struct emac_adapter *adpt); void emac_mac_mode_config(struct emac_adapter *adpt); void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 99a14df28b96..441c19366489 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -22,8 +22,6 @@ #include <linux/acpi.h> #include "emac.h" #include "emac-mac.h" -#include "emac-phy.h" -#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_MDIO_CTRL 0x001414 @@ -201,6 +199,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) else adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); + /* of_phy_find_device() claims a reference to the phydev, + * so we do that here manually as well. When the driver + * later unloads, it can unilaterally drop the reference + * without worrying about ACPI vs DT. + */ + if (adpt->phydev) + get_device(&adpt->phydev->mdio.dev); } else { struct device_node *phy_np; @@ -221,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) return -ENODEV; } - if (adpt->phydev->drv) - phy_attached_print(adpt->phydev, NULL); - return 0; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h index 49f3701a6dd7..c0c301c72129 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h @@ -13,19 +13,6 @@ #ifndef _EMAC_PHY_H_ #define _EMAC_PHY_H_ -typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); - -/** emac_phy - internal emac phy - * @base base address - * @digital per-lane digital block - * @initialize initialization function - */ -struct emac_phy { - void __iomem *base; - void __iomem *digital; - emac_sgmii_initialize initialize; -}; - struct emac_adapter; int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c index af690e1a6e7b..10de8d0d9a56 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c @@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = { int emac_sgmii_init_fsm9900(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; unsigned int i; emac_reg_write_all(phy->base, physical_coding_sublayer_programming, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c index 5b8419498ef1..f62c215be779 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c @@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2400(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c index 6170200d7479..b9c0df7bdd15 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c @@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2432(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index bf722a9bb09d..040b28977ee7 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -25,7 +25,9 @@ #define EMAC_SGMII_PHY_SPEED_CFG1 0x0074 #define EMAC_SGMII_PHY_IRQ_CMD 0x00ac #define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0 +#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4 #define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8 +#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4 #define FORCE_AN_TX_CFG BIT(5) #define FORCE_AN_RX_CFG BIT(4) @@ -36,6 +38,8 @@ #define SPDMODE_100 BIT(0) #define SPDMODE_10 0 +#define CDR_ALIGN_DET BIT(6) + #define IRQ_GLOBAL_CLEAR BIT(0) #define DECODE_CODE_ERR BIT(7) @@ -44,52 +48,28 @@ #define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 #define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR) +#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR) #define SERDES_START_WAIT_TIMES 100 -static int emac_sgmii_link_init(struct emac_adapter *adpt) +/* Initialize the SGMII link between the internal and external PHYs. */ +static void emac_sgmii_link_init(struct emac_adapter *adpt) { - struct phy_device *phydev = adpt->phydev; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; + /* Always use autonegotiation. It works no matter how the external + * PHY is configured. + */ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - - if (phydev->autoneg == AUTONEG_ENABLE) { - val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); - val |= AN_ENABLE; - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } else { - u32 speed_cfg; - - switch (phydev->speed) { - case SPEED_10: - speed_cfg = SPDMODE_10; - break; - case SPEED_100: - speed_cfg = SPDMODE_100; - break; - case SPEED_1000: - speed_cfg = SPDMODE_1000; - break; - default: - return -EINVAL; - } - - if (phydev->duplex == DUPLEX_FULL) - speed_cfg |= DUPLEX_MODE; - - val &= ~AN_ENABLE; - writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1); - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } - - return 0; + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); + val |= AN_ENABLE; + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); } static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 status; writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); @@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) return 0; } +/* The number of decode errors that triggers a reset */ +#define DECODE_ERROR_LIMIT 2 + +static irqreturn_t emac_sgmii_interrupt(int irq, void *data) +{ + struct emac_adapter *adpt = data; + struct emac_sgmii *phy = &adpt->phy; + u32 status; + + status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS); + status &= SGMII_ISR_MASK; + if (!status) + return IRQ_HANDLED; + + /* If we get a decoding error and CDR is not locked, then try + * resetting the internal PHY. The internal PHY uses an embedded + * clock with Clock and Data Recovery (CDR) to recover the + * clock and data. + */ + if (status & SGMII_PHY_INTERRUPT_ERR) { + int count; + + /* The SGMII is capable of recovering from some decode + * errors automatically. However, if we get multiple + * decode errors in a row, then assume that something + * is wrong and reset the interface. + */ + count = atomic_inc_return(&phy->decode_error_count); + if (count == DECODE_ERROR_LIMIT) { + schedule_work(&adpt->work_thread); + atomic_set(&phy->decode_error_count, 0); + } + } else { + /* We only care about consecutive decode errors. */ + atomic_set(&phy->decode_error_count, 0); + } + + if (emac_sgmii_irq_clear(adpt, status)) { + netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n"); + schedule_work(&adpt->work_thread); + } + + return IRQ_HANDLED; +} + static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; /* Reset PHY */ @@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt) int ret; emac_sgmii_reset_prepare(adpt); - - ret = emac_sgmii_link_init(adpt); - if (ret) { - netdev_err(adpt->netdev, "unsupported link speed\n"); - return; - } + emac_sgmii_link_init(adpt); ret = adpt->phy.initialize(adpt); if (ret) @@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt) ret); } +static int emac_sgmii_open(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + if (sgmii->irq) { + /* Make sure interrupts are cleared and disabled first */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0, + "emac-sgmii", adpt); + if (ret) { + netdev_err(adpt->netdev, + "could not register handler for internal PHY\n"); + return ret; + } + } + + return 0; +} + +static int emac_sgmii_close(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Make sure interrupts are disabled */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + free_irq(sgmii->irq, adpt); + + return 0; +} + +/* The error interrupts are only valid after the link is up */ +static int emac_sgmii_link_up(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + + writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + return 0; +} + +static int emac_sgmii_link_down(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + + return 0; +} + static int emac_sgmii_acpi_match(struct device *dev, void *data) { #ifdef CONFIG_ACPI @@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_initialize *initialize = data; + emac_sgmii_function *initialize = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = { int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; struct resource *res; int ret; @@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_initialize)match->data; + phy->initialize = (emac_sgmii_function)match->data; } + phy->open = emac_sgmii_open; + phy->close = emac_sgmii_close; + phy->link_up = emac_sgmii_link_up; + phy->link_down = emac_sgmii_link_down; + /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (ret) goto error; - emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + emac_sgmii_link_init(adpt); + + ret = platform_get_irq(sgmii_pdev, 0); + if (ret > 0) + phy->irq = ret; /* We've remapped the addresses, so we don't need the device any * more. of_find_device_by_node() says we should release it. diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index 80ed3dc3157a..e7c0c3b2baa4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,6 +16,31 @@ struct emac_adapter; struct platform_device; +typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); + +/** emac_sgmii - internal emac phy + * @base base address + * @digital per-lane digital block + * @irq the interrupt number + * @decode_error_count reference count of consecutive decode errors + * @initialize initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_up called when the link comes up + * @link_down called when the link comes down + */ +struct emac_sgmii { + void __iomem *base; + void __iomem *digital; + unsigned int irq; + atomic_t decode_error_count; + emac_sgmii_function initialize; + emac_sgmii_function open; + emac_sgmii_function close; + emac_sgmii_function link_up; + emac_sgmii_function link_down; +}; + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); void emac_sgmii_reset(struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 6ffe1920b6dd..3387c0a88746 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget) emac_mac_rx_process(adpt, rx_q, &work_done, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); irq->mask |= rx_q->intr; writel(irq->mask, adpt->base + EMAC_INT_MASK); @@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) static int emac_open(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_irq *irq = &adpt->irq; int ret; + ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq); + if (ret) { + netdev_err(adpt->netdev, "could not request emac-core0 irq\n"); + return ret; + } + /* allocate rx/tx dma buffer & descriptors */ ret = emac_mac_rx_tx_rings_alloc_all(adpt); if (ret) { netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); + free_irq(irq->irq, irq); return ret; } ret = emac_mac_up(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); return ret; } - emac_mac_start(adpt); + ret = adpt->phy.open(adpt); + if (ret) { + emac_mac_down(adpt); + emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); + return ret; + } return 0; } @@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); + adpt->phy.close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); + free_irq(adpt->irq.irq, &adpt->irq); + mutex_unlock(&adpt->reset_lock); return 0; @@ -602,7 +620,7 @@ static int emac_probe(struct platform_device *pdev) { struct net_device *netdev; struct emac_adapter *adpt; - struct emac_phy *phy; + struct emac_sgmii *phy; u16 devid, revid; u32 reg; int ret; @@ -636,6 +654,7 @@ static int emac_probe(struct platform_device *pdev) adpt->msg_enable = EMAC_MSG_DEFAULT; phy = &adpt->phy; + atomic_set(&phy->decode_error_count, 0); mutex_init(&adpt->reset_lock); spin_lock_init(&adpt->stats.lock); @@ -729,8 +748,7 @@ static int emac_probe(struct platform_device *pdev) err_undo_napi: netif_napi_del(&adpt->rx_q.napi); err_undo_mdiobus: - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); err_undo_clocks: emac_clks_teardown(adpt); @@ -750,8 +768,7 @@ static int emac_remove(struct platform_device *pdev) emac_clks_teardown(adpt); - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); free_netdev(netdev); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 4b8483cc2c7f..ef91dcc7f646 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include "emac-mac.h" #include "emac-phy.h" +#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_DMA_MAS_CTRL 0x001400 @@ -166,10 +167,6 @@ enum emac_clk_id { #define EMAC_MAX_SETUP_LNK_CYCLE 100 -/* Wake On Lan */ -#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ -#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ - struct emac_stats { /* rx */ u64 rx_ok; /* good packets */ @@ -291,7 +288,7 @@ struct emac_adapter { void __iomem *base; void __iomem *csr; - struct emac_phy phy; + struct emac_sgmii phy; struct emac_stats stats; struct emac_irq irq; @@ -330,7 +327,6 @@ struct emac_adapter { int emac_reinit_locked(struct emac_adapter *adpt); void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); -irqreturn_t emac_isr(int irq, void *data); void emac_set_ethtool_ops(struct net_device *netdev); void emac_update_hw_stats(struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 570ed3bd3cbf..9bcd4aefc9c5 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -170,7 +170,7 @@ struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ - long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ @@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } num_tx_since_rx++; } else if (num_tx_since_rx > 8 && - time_after(jiffies, dev->last_rx + HZ)) { + time_after(jiffies, lp->last_rx_time + HZ)) { if (net_debug > 2) printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " "%ld jiffies status %02x CMR1 %02x.\n", dev->name, - num_tx_since_rx, jiffies - dev->last_rx, status, + num_tx_since_rx, jiffies - lp->last_rx_time, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); dev->stats.rx_missed_errors++; hardware_init(dev); @@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev) read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6073f466f6e8..81f18a833527 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); rtl_irq_enable(tp, enable_mask); mmiowb(); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index f1109661a533..0525bd696d5d 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -76,6 +76,7 @@ enum ravb_reg { CDAR20 = 0x0060, CDAR21 = 0x0064, ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ RCR = 0x0090, RQC0 = 0x0094, RQC1 = 0x0098, @@ -248,6 +249,15 @@ enum ESR_BIT { ESR_EIL = 0x00001000, }; +/* APSR */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, + APSR_CMSW = 0x00000010, + APSR_DM = 0x00006000, /* Undocumented? */ + APSR_DM_RDM = 0x00002000, + APSR_DM_TDM = 0x00004000, +}; + /* RCR */ enum RCR_BIT { RCR_EFFS = 0x00000001, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 92d7692c840d..8cfc4a54f2dc 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sys_soc.h> #include <asm/div64.h> @@ -179,6 +180,49 @@ static struct mdiobb_ops bb_ops = { .get_mdio_data = ravb_get_mdio_data, }; +/* Free TX skb function for AVB-IP */ +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats[q]; + struct ravb_tx_desc *desc; + int free_num = 0; + int entry; + u32 size; + + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { + bool txed; + + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + NUM_TX_DESC); + desc = &priv->tx_ring[q][entry]; + txed = desc->die_dt == DT_FEMPTY; + if (free_txed_only && !txed) + break; + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + size = le16_to_cpu(desc->ds_tagl) & TX_DS; + /* Free the original skb. */ + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + /* Last packet descriptor? */ + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { + entry /= NUM_TX_DESC; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + if (txed) + stats->tx_packets++; + } + free_num++; + } + if (txed) + stats->tx_bytes += size; + desc->die_dt = DT_EEMPTY; + } + return free_num; +} + /* Free skb's and DMA buffers for Ethernet AVB */ static void ravb_ring_free(struct net_device *ndev, int q) { @@ -194,19 +238,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) kfree(priv->rx_skb[q]); priv->rx_skb[q] = NULL; - /* Free TX skb ringbuffer */ - if (priv->tx_skb[q]) { - for (i = 0; i < priv->num_tx_ring[q]; i++) - dev_kfree_skb(priv->tx_skb[q][i]); - } - kfree(priv->tx_skb[q]); - priv->tx_skb[q] = NULL; - /* Free aligned TX buffers */ kfree(priv->tx_align[q]); priv->tx_align[q] = NULL; if (priv->rx_ring[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + PKT_BUF_SZ, + DMA_FROM_DEVICE); + } ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], @@ -215,12 +261,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) } if (priv->tx_ring[q]) { + ravb_tx_free(ndev, q, false); + ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * NUM_TX_DESC + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; } + + /* Free TX skb ringbuffer. + * SKBs are freed by ravb_tx_free() call above. + */ + kfree(priv->tx_skb[q]); + priv->tx_skb[q] = NULL; } /* Format skb and descriptor buffer for Ethernet AVB */ @@ -431,44 +485,6 @@ static int ravb_dmac_init(struct net_device *ndev) return 0; } -/* Free TX skb function for AVB-IP */ -static int ravb_tx_free(struct net_device *ndev, int q) -{ - struct ravb_private *priv = netdev_priv(ndev); - struct net_device_stats *stats = &priv->stats[q]; - struct ravb_tx_desc *desc; - int free_num = 0; - int entry; - u32 size; - - for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { - entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); - desc = &priv->tx_ring[q][entry]; - if (desc->die_dt != DT_FEMPTY) - break; - /* Descriptor type must be checked before all other reads */ - dma_rmb(); - size = le16_to_cpu(desc->ds_tagl) & TX_DS; - /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - size, DMA_TO_DEVICE); - /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; - dev_kfree_skb_any(priv->tx_skb[q][entry]); - priv->tx_skb[q][entry] = NULL; - stats->tx_packets++; - } - free_num++; - } - stats->tx_bytes += size; - desc->die_dt = DT_EEMPTY; - } - return free_num; -} - static void ravb_get_tx_tstamp(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -902,7 +918,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ ravb_write(ndev, ~mask, TIS); - ravb_tx_free(ndev, q); + ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -926,14 +942,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { + if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); - } - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); - } out: return budget - quota; } @@ -977,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev) phy_print_status(phydev); } +static const struct soc_device_attribute r8a7795es10[] = { + { .soc_id = "r8a7795", .revision = "ES1.0", }, + { /* sentinel */ } +}; + /* PHY init function */ static int ravb_phy_init(struct net_device *ndev) { @@ -1012,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev) goto err_deregister_fixed_link; } - /* This driver only support 10/100Mbit speeds on Gen3 + /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 * at this time. */ - if (priv->chip_id == RCAR_GEN3) { + if (soc_device_match(r8a7795es10)) { err = phy_set_max_speed(phydev, SPEED_100); if (err) { netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); @@ -1508,6 +1525,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + entry / NUM_TX_DESC * DPTR_ALIGN; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + /* Zero length DMA descriptors are problematic as they seem to + * terminate DMA transfers. Avoid them by simply using a length of + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of + * data by the call to skb_put_padto() above this is safe with + * respect to both the length of the first DMA descriptor (len) + * overflowing the available data and the length of the second DMA + * descriptor (skb->len - len) being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + memcpy(buffer, skb->data, len); dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, dma_addr)) @@ -1558,7 +1588,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->cur_tx[q] += NUM_TX_DESC; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q); exit: @@ -1895,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev) } } +/* Set tx and rx clock internal delay modes */ +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int set = 0; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + set |= APSR_DM_RDM; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + set |= APSR_DM_TDM; + + ravb_modify(ndev, APSR, APSR_DM, set); +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -2007,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, @@ -2143,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Restore descriptor base address table */ ravb_write(ndev, priv->desc_bat_dma, DBAT); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 45a7a6ba7644..2f08d2735fe2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -518,7 +518,14 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xe77f009f, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -556,7 +563,14 @@ static struct sh_eth_cpu_data r8a7740_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -603,7 +617,12 @@ static struct sh_eth_cpu_data r8a777x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -626,7 +645,12 @@ static struct sh_eth_cpu_data r8a779x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -667,7 +691,12 @@ static struct sh_eth_cpu_data sh7724_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -702,7 +731,14 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -769,7 +805,14 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -800,7 +843,13 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -830,7 +879,13 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -851,7 +906,14 @@ static struct sh_eth_cpu_data sh7763_data = { static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .apr = 1, .mpr = 1, @@ -862,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = { static struct sh_eth_cpu_data sh771x_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, }; @@ -1547,10 +1616,10 @@ static void sh_eth_emac_interrupt(struct net_device *ndev) sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); /* clear int */ sh_eth_modify(ndev, ECSR, 0, 0); - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, DMAC_M_ECI); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } @@ -1652,7 +1721,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) * bit... */ intr_enable = sh_eth_read(ndev, EESIPR); - intr_status &= intr_enable | DMAC_M_ECI; + intr_status &= intr_enable | EESIPR_ECIIP; if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | cd->eesr_err_check)) ret = IRQ_HANDLED; @@ -3199,7 +3268,7 @@ static int sh_eth_wol_setup(struct net_device *ndev) /* Only allow ECI interrupts */ synchronize_irq(ndev->irq); napi_disable(&mdp->napi); - sh_eth_write(ndev, DMAC_M_ECI, EESIPR); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); /* Enable MagicPacket */ sh_eth_modify(ndev, ECMR, 0, ECMR_MPDE); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a1bb8cc413dc..a6753ccba711 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -268,19 +268,35 @@ enum EESR_BIT { EESR_TFE | EESR_TDE) /* EESIPR */ -enum DMAC_IM_BIT { - DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000, - DMAC_M_RABT = 0x02000000, - DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000, - DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000, - DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000, - DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000, - DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800, - DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200, - DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080, - DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008, - DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002, - DMAC_M_RINT1 = 0x00000001, +enum EESIPR_BIT { + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, }; /* Receive descriptor 0 bits */ diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 7c450b5a1138..0f63a44a955d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget) } if (credits < budget) - napi_complete(napi); + napi_complete_done(napi, credits); rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 07074d9bc45d..d54490d3f7ad 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget) work_done = sxgbe_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 208e0048709b..0475f1831b92 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -60,15 +60,33 @@ struct efx_ef10_vlan { u16 vid; }; +enum efx_ef10_default_filters { + EFX_EF10_BCAST, + EFX_EF10_UCDEF, + EFX_EF10_MCDEF, + EFX_EF10_VXLAN4_UCDEF, + EFX_EF10_VXLAN4_MCDEF, + EFX_EF10_VXLAN6_UCDEF, + EFX_EF10_VXLAN6_MCDEF, + EFX_EF10_NVGRE4_UCDEF, + EFX_EF10_NVGRE4_MCDEF, + EFX_EF10_NVGRE6_UCDEF, + EFX_EF10_NVGRE6_MCDEF, + EFX_EF10_GENEVE4_UCDEF, + EFX_EF10_GENEVE4_MCDEF, + EFX_EF10_GENEVE6_UCDEF, + EFX_EF10_GENEVE6_MCDEF, + + EFX_EF10_NUM_DEFAULT_FILTERS +}; + /* Per-VLAN filters information */ struct efx_ef10_filter_vlan { struct list_head list; u16 vid; u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; - u16 ucdef; - u16 bcast; - u16 mcdef; + u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; }; struct efx_ef10_dev_addr { @@ -78,7 +96,7 @@ struct efx_ef10_dev_addr { struct efx_ef10_filter_table { /* The MCDI match masks supported by this fw & hw, in order of priority */ u32 rx_match_mcdi_flags[ - MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; unsigned int rx_match_count; struct { @@ -197,11 +215,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); - if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { nic_data->datapath_caps2 = MCDI_DWORD(outbuf, GET_CAPABILITIES_V2_OUT_FLAGS2); - else + nic_data->piobuf_size = MCDI_WORD(outbuf, + GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); + } else { nic_data->datapath_caps2 = 0; + nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; + } /* record the DPCPU firmware IDs to determine VEB vswitching support. */ @@ -823,8 +845,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) offset = ((efx->tx_channel_offset + efx->n_tx_channels - tx_queue->channel->channel - 1) * efx_piobuf_size); - index = offset / ER_DZ_TX_PIOBUF_SIZE; - offset = offset % ER_DZ_TX_PIOBUF_SIZE; + index = offset / nic_data->piobuf_size; + offset = offset % nic_data->piobuf_size; /* When the host page size is 4K, the first * host page in the WC mapping may be within @@ -1159,14 +1181,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * functions of the controller. */ if (efx_piobuf_size != 0 && - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= efx->n_tx_channels) { unsigned int n_piobufs = DIV_ROUND_UP(efx->n_tx_channels, - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + nic_data->piobuf_size / efx_piobuf_size); rc = efx_ef10_alloc_piobufs(efx, n_piobufs); - if (rc) + if (rc == -ENOSPC) + netif_dbg(efx, probe, efx->net_dev, + "out of PIO buffers; cannot allocate more\n"); + else if (rc == -EPERM) + netif_dbg(efx, probe, efx->net_dev, + "not permitted to allocate PIO buffers\n"); + else if (rc) netif_err(efx, probe, efx->net_dev, "failed to allocate PIO buffers (%d)\n", rc); else @@ -1313,15 +1341,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) efx_ef10_free_piobufs(efx); } - /* Log an error on failure, but this is non-fatal */ - if (rc) + /* Log an error on failure, but this is non-fatal. + * Permission errors are less important - we've presumably + * had the PIO buffer licence removed. + */ + if (rc == -EPERM) + netif_dbg(efx, drv, efx->net_dev, + "not permitted to restore PIO buffers\n"); + else if (rc) netif_err(efx, drv, efx->net_dev, "failed to restore PIO buffers (%d)\n", rc); nic_data->must_restore_piobufs = false; } /* don't fail init if RSS setup doesn't work */ - rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); + rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = (rc == 0); return 0; @@ -2358,7 +2392,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) /* Create TX descriptor ring entry */ if (buffer->flags & EFX_TX_BUF_OPTION) { *txd = buffer->option; + if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) + /* PIO descriptor */ + tx_queue->packet_write_count = tx_queue->write_count; } else { + tx_queue->packet_write_count = tx_queue->write_count; BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_3( *txd, @@ -2527,7 +2565,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) } static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); @@ -2538,6 +2576,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + /* This iterates over the length of efx->rx_indir_table, but copies + * bytes from rx_indir_table. That's because the latter is a pointer + * rather than an array, but should have the same length. + * The efx->rx_hash_key loop below is similar. + */ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = @@ -2553,8 +2596,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) - MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = - efx->rx_hash_key[i]; + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, sizeof(keybuf), NULL, 0, NULL); @@ -2587,7 +2629,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, } static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; @@ -2606,7 +2649,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, } rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, - rx_indir_table); + rx_indir_table, key); if (rc != 0) goto fail2; @@ -2617,6 +2660,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, if (rx_indir_table != efx->rx_indir_table) memcpy(efx->rx_indir_table, rx_indir_table, sizeof(efx->rx_indir_table)); + if (key != efx->rx_hash_key) + memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); + return 0; fail2: @@ -2627,15 +2673,69 @@ fail1: return rc; } +static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); + size_t outlen; + int rc, i; + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != + MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + return -ENOENT; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), + tablebuf, sizeof(tablebuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = MCDI_PTR(tablebuf, + RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), + keybuf, sizeof(keybuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + efx->rx_hash_key[i] = MCDI_PTR( + keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; + + return 0; +} + static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { int rc; if (efx->rss_spread == 1) return 0; - rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); + if (!key) + key = efx->rx_hash_key; + + rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); if (rc == -ENOBUFS && !user) { unsigned context_size; @@ -2673,6 +2773,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table + __attribute__ ((unused)), + const u8 *key __attribute__ ((unused))) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -3508,6 +3610,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, table->entry[filter_idx].spec = (unsigned long)spec | flags; } +static void +efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf) +{ + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); + u32 match_fields = 0, uc_match, mc_match; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ +#define COPY_VALUE(value, mcdi_field) \ + do { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(value)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &value, sizeof(value)); \ + } while (0) +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + COPY_VALUE(spec->gen_field, mcdi_field); \ + } + /* Handle encap filters first. They will always be mismatch + * (unknown UC or MC) filters + */ + if (encap_type) { + /* ether_type and outer_ip_proto need to be variables + * because COPY_VALUE wants to memcpy them + */ + __be16 ether_type = + htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? + ETH_P_IPV6 : ETH_P_IP); + u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; + u8 outer_ip_proto; + + switch (encap_type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; + /* fallthrough */ + case EFX_ENCAP_TYPE_GENEVE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_UDP; + COPY_VALUE(outer_ip_proto, IP_PROTO); + /* We always need to set the type field, even + * though we're not matching on the TNI. + */ + MCDI_POPULATE_DWORD_1(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + vni_type); + break; + case EFX_ENCAP_TYPE_NVGRE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_GRE; + COPY_VALUE(outer_ip_proto, IP_PROTO); + break; + default: + WARN_ON(1); + } + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD +#undef COPY_VALUE + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); +} + static void efx_ef10_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, @@ -3516,7 +3716,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, struct efx_ef10_nic_data *nic_data = efx->nic_data; u32 flags = spec->flags; - memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); /* Remove RSS flag if we don't have an RSS context. */ if (flags & EFX_FILTER_FLAG_RX_RSS && @@ -3529,46 +3729,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_REPLACE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); } else { - u32 match_fields = 0; - - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, - efx_ef10_filter_is_exclusive(spec) ? - MC_CMD_FILTER_OP_IN_OP_INSERT : - MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); - - /* Convert match flags and values. Unlike almost - * everything else in MCDI, these fields are in - * network byte order. - */ - if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) - match_fields |= - is_multicast_ether_addr(spec->loc_mac) ? - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; -#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ - if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ - match_fields |= \ - 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN; \ - BUILD_BUG_ON( \ - MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ - sizeof(spec->gen_field)); \ - memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ - &spec->gen_field, sizeof(spec->gen_field)); \ - } - COPY_FIELD(REM_HOST, rem_host, SRC_IP); - COPY_FIELD(LOC_HOST, loc_host, DST_IP); - COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); - COPY_FIELD(REM_PORT, rem_port, SRC_PORT); - COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); - COPY_FIELD(LOC_PORT, loc_port, DST_PORT); - COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); - COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); - COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); - COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); -#undef COPY_FIELD - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, - match_fields); + efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); } MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); @@ -3597,8 +3758,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, bool replacing) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); int rc; efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); @@ -3613,37 +3774,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx, static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); unsigned int match_flags = spec->match_flags; + unsigned int uc_match, mc_match; u32 mcdi_flags = 0; - if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { - match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; - mcdi_flags |= - is_multicast_ether_addr(spec->loc_mac) ? - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); - } - -#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ - unsigned int old_match_flags = match_flags; \ +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ + unsigned int old_match_flags = match_flags; \ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ if (match_flags != old_match_flags) \ mcdi_flags |= \ - (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + (1 << ((encap) ? \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ + mcdi_field ## _LBN)); \ } - MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); - MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); - MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); - MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); - MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); - MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); - MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); + /* inner or outer based on encap type */ + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); + /* always outer */ + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); #undef MAP_FILTER_TO_MCDI_FLAG + /* special handling for encap type, and mismatch */ + if (encap_type) { + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; + mcdi_flags |= + (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + } + /* Did we map them all? */ WARN_ON_ONCE(match_flags); @@ -4303,29 +4485,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, #endif /* CONFIG_RFS_ACCEL */ -static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) { int match_flags = 0; -#define MAP_FLAG(gen_flag, mcdi_field) { \ +#define MAP_FLAG(gen_flag, mcdi_field) do { \ u32 old_mcdi_flags = mcdi_flags; \ - mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ if (mcdi_flags != old_mcdi_flags) \ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } while (0) + + if (encap) { + /* encap filters must specify encap type */ + match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + /* and imply ethertype and ip proto */ + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + /* VLAN tags refer to the outer packet */ + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + /* everything else refers to the inner packet */ + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, IFRM_SRC_IP); + MAP_FLAG(LOC_HOST, IFRM_DST_IP); + MAP_FLAG(REM_MAC, IFRM_SRC_MAC); + MAP_FLAG(REM_PORT, IFRM_SRC_PORT); + MAP_FLAG(LOC_MAC, IFRM_DST_MAC); + MAP_FLAG(LOC_PORT, IFRM_DST_PORT); + MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); + MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); + } else { + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); } - MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); - MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); - MAP_FLAG(REM_HOST, SRC_IP); - MAP_FLAG(LOC_HOST, DST_IP); - MAP_FLAG(REM_MAC, SRC_MAC); - MAP_FLAG(REM_PORT, SRC_PORT); - MAP_FLAG(LOC_MAC, DST_MAC); - MAP_FLAG(LOC_PORT, DST_PORT); - MAP_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FLAG(INNER_VID, INNER_VLAN); - MAP_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FLAG(IP_PROTO, IP_PROTO); #undef MAP_FLAG /* Did we map them all? */ @@ -4352,6 +4559,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) } static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + bool encap, enum efx_filter_match_flags match_flags) { unsigned int match_pri; @@ -4360,7 +4568,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) { - mf = efx_ef10_filter_match_flags_from_mcdi( + mf = efx_ef10_filter_match_flags_from_mcdi(encap, table->rx_match_mcdi_flags[match_pri]); if (mf == match_flags) return true; @@ -4369,39 +4577,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, return false; } -static int efx_ef10_filter_table_probe(struct efx_nic *efx) +static int +efx_ef10_filter_table_probe_matches(struct efx_nic *efx, + struct efx_ef10_filter_table *table, + bool encap) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; - struct efx_ef10_filter_table *table; - struct efx_ef10_vlan *vlan; size_t outlen; int rc; - if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) - return -EINVAL; - - if (efx->filter_state) /* already probed */ - return 0; - - table = kzalloc(sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; - /* Find out which RX filter types are supported, and their priorities */ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; + pd_match_count = MCDI_VAR_ARRAY_LEN( outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); - table->rx_match_count = 0; for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { u32 mcdi_flags = @@ -4409,7 +4608,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) outbuf, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, pd_match_pri); - rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); if (rc < 0) { netif_dbg(efx, probe, efx->net_dev, "%s: fw flags %#x pri %u not supported in driver\n", @@ -4424,10 +4623,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) } } + return 0; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; + int rc; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->rx_match_count = 0; + rc = efx_ef10_filter_table_probe_matches(efx, table, false); + if (rc) + goto fail; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + rc = efx_ef10_filter_table_probe_matches(efx, table, true); + if (rc) + goto fail; if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && - !(efx_ef10_filter_match_supported(table, + !(efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && - efx_ef10_filter_match_supported(table, + efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { netif_info(efx, probe, net_dev, "VLAN filters are not supported in this firmware variant\n"); @@ -4473,10 +4702,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int invalid_filters = 0, failed = 0; + struct efx_ef10_filter_vlan *vlan; struct efx_filter_spec *spec; unsigned int filter_idx; - bool failed = false; - int rc; + u32 mcdi_flags; + int match_pri; + int rc, i; WARN_ON(!rwsem_is_locked(&efx->filter_sem)); @@ -4493,6 +4725,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (!spec) continue; + mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); + match_pri = 0; + while (match_pri < table->rx_match_count && + table->rx_match_mcdi_flags[match_pri] != mcdi_flags) + ++match_pri; + if (match_pri >= table->rx_match_count) { + invalid_filters++; + goto not_restored; + } + if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && + spec->rss_context != nic_data->rx_rss_context) + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with specific RSS context.\n"); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; spin_unlock_bh(&efx->filter_lock); @@ -4500,10 +4746,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) &table->entry[filter_idx].handle, false); if (rc) - failed = true; - + failed++; spin_lock_bh(&efx->filter_lock); + if (rc) { +not_restored: + list_for_each_entry(vlan, &table->vlan_list, list) + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) + if (vlan->default_filters[i] == filter_idx) + vlan->default_filters[i] = + EFX_EF10_FILTER_ID_INVALID; + kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { @@ -4514,9 +4767,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) spin_unlock_bh(&efx->filter_lock); + /* This can happen validly if the MC's capabilities have changed, so + * is not an error. + */ + if (invalid_filters) + netif_dbg(efx, drv, efx->net_dev, + "Did not restore %u filters that are now unsupported.\n", + invalid_filters); + if (failed) netif_err(efx, hw, efx->net_dev, - "unable to restore all filters\n"); + "unable to restore %u filters\n", failed); else nic_data->must_restore_filters = false; } @@ -4594,9 +4855,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); - efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); - efx_ef10_filter_mark_one_old(efx, &vlan->bcast); - efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); } /* Mark old filters that may need to be removed. @@ -4714,6 +4974,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ + EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); @@ -4730,9 +4992,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, } return rc; } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } @@ -4741,6 +5002,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan, + enum efx_encap_type encap_type, bool multicast, bool rollback) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4748,6 +5010,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; + u16 *id; filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -4758,19 +5021,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, else efx_filter_set_uc_def(&spec); + if (encap_type) { + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + efx_filter_set_encap_type(&spec, encap_type); + else + /* don't insert encap filters on non-supporting + * platforms. ID will be left as INVALID. + */ + return 0; + } + if (vlan->vid != EFX_FILTER_VID_UNSPEC) efx_filter_set_eth_local(&spec, vlan->vid, NULL); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, - efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + const char *um = multicast ? "Multicast" : "Unicast"; + const char *encap_name = ""; + const char *encap_ipv = ""; + + if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_VXLAN) + encap_name = "VXLAN "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_NVGRE) + encap_name = "NVGRE "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_GENEVE) + encap_name = "GENEVE "; + if (encap_type & EFX_ENCAP_FLAG_IPV6) + encap_ipv = "IPv6 "; + else if (encap_type) + encap_ipv = "IPv4 "; + + /* unprivileged functions can't insert mismatch filters + * for encapsulated or unicast traffic, so downgrade + * those warnings to debug. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + rc == -EPERM && (encap_type || !multicast), warn, + "%s%s%s mismatch filter insert failed rc=%d\n", + encap_name, encap_ipv, um, rc); } else if (multicast) { - EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); - vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); - if (!nic_data->workaround_26807) { + /* mapping from encap types to default filter IDs (multicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_MCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_MCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_MCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -4785,20 +5104,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - vlan->mcdef); - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + *id); + *id = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID( + vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } rc = 0; } else { - EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); - vlan->ucdef = rc; + /* mapping from encap types to default filter IDs (unicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_UCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_UCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_UCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = rc; rc = 0; } return rc; @@ -4921,7 +5264,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Insert/renew unicast filters */ if (table->uc_promisc) { - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, + false, false); efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to @@ -4929,8 +5273,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, * our individual unicast filters. */ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + false, false); } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + false, false); /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove @@ -4944,7 +5305,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); efx_ef10_filter_insert_addr_list(efx, vlan, @@ -4954,7 +5317,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, false); efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { @@ -4967,11 +5332,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + true, false); } /* Caller must hold efx->filter_sem for read if race against @@ -5058,9 +5440,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; - vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; - vlan->bcast = EFX_EF10_FILTER_ID_INVALID; - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; list_add_tail(&vlan->list, &table->vlan_list); @@ -5087,9 +5468,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mc[i]); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->default_filters[i]); kfree(vlan); } @@ -5621,6 +6003,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5678,6 +6061,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; const struct efx_nic_type efx_hunt_a0_nic_type = { @@ -5728,6 +6112,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5796,6 +6181,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .option_descriptors = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, @@ -5803,4 +6189,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index a55c53d6f559..ed4b14283461 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -6,6 +6,7 @@ * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ +#include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/module.h> #include "net_driver.h" @@ -554,7 +555,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) return 0; fail: - memset(vf->mac, 0, ETH_ALEN); + eth_zero_addr(vf->mac); return rc; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 543fa488d049..466c02805f72 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -331,7 +331,7 @@ static int efx_poll(struct napi_struct *napi, int budget) * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); efx_nic_eventq_read_ack(channel); } @@ -2334,8 +2334,8 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } -int efx_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) +static int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 18ebaea44e82..adddf70780ad 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1278,15 +1278,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table); } +static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (indir) memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (key) + memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size); return 0; } @@ -1295,14 +1309,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, { struct efx_nic *efx = netdev_priv(net_dev); - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + if (!indir && !key) return 0; - return efx->type->rx_push_rss_config(efx, true, indir); + if (!key) + key = efx->rx_hash_key; + if (!indir) + indir = efx->rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); } static int efx_ethtool_get_ts_info(struct net_device *net_dev, @@ -1377,6 +1395,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, .get_ts_info = efx_ethtool_get_ts_info, diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 8cfbe01e1ddf..c4ff3bb1a1ef 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -327,7 +327,7 @@ static int ef4_poll(struct napi_struct *napi, int budget) * since ef4_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); ef4_nic_eventq_read_ack(channel); } diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index e4ca2161af70..ba45150f53c7 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) } } +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + /* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index d0ed7f71ea7e..8189a1cd973f 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -27,6 +27,7 @@ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. * Used for RX default unicast and multicast/broadcast filters. * * Only some combinations are supported, depending on NIC type: @@ -54,6 +55,7 @@ enum efx_filter_match_flags { EFX_FILTER_MATCH_OUTER_VID = 0x0100, EFX_FILTER_MATCH_IP_PROTO = 0x0200, EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, }; /** @@ -98,6 +100,26 @@ enum efx_filter_flags { EFX_FILTER_FLAG_TX = 0x10, }; +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + /** * struct efx_filter_spec - specification for a hardware filter * @match_flags: Match type flags, from &enum efx_filter_match_flags @@ -118,6 +140,8 @@ enum efx_filter_flags { * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set * * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be * used to initialise the structure. The efx_filter_set_*() functions @@ -144,7 +168,8 @@ struct efx_filter_spec { __be32 rem_host[4]; __be16 loc_port; __be16 rem_port; - /* total 64 bytes */ + u32 encap_type:4; + /* total 65 bytes */ }; enum { @@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) return 0; } +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} #endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 995651341b94..24b271b9c260 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -837,11 +837,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, outbuf, outlen, outlen_actual, quiet, NULL, raw_rc); } else { - netif_printk(efx, hw, - rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x failed after proxy auth rc=%d\n", - cmd, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); if (rc == -EINTR || rc == -EIO) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); @@ -1084,10 +1082,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", - cmd, inlen, rc, code, err_arg); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -2057,8 +2054,8 @@ fail: /* Older firmware lacks GET_WORKAROUNDS and this isn't especially * terrifying. The call site will have to deal with it though. */ - netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, - efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 49db9e833c96..5927c20ba43f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -208,6 +208,12 @@ struct efx_tx_buffer { * @write_count: Current write pointer * This is the number of buffers that have been added to the * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -255,6 +261,7 @@ struct efx_tx_queue { /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; + unsigned int packet_write_count; unsigned int old_read_count; unsigned int tso_bursts; unsigned int tso_long_headers; @@ -1174,6 +1181,7 @@ struct efx_mtd_partition { * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1237,6 +1245,7 @@ struct efx_mtd_partition { * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) @@ -1303,7 +1312,8 @@ struct efx_nic_type { unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, - const u32 *rx_indir_table); + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1395,12 +1405,14 @@ struct efx_nic_type { unsigned int rx_buffer_padding; bool can_rx_scatter; bool always_rx_scatter; + bool option_descriptors; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; unsigned int max_rx_ip_filters; u32 hwtstamp_filters; + unsigned int rx_hash_key_size; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 223774635cba..85cf131288b7 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + /* Decide whether we can use TX PIO, ie. write packet data directly into * a buffer on the device. This can reduce latency at the expense of * throughput, so we only do this if both hardware and software TX rings @@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) { struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); - return tx_queue->piobuf && - __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && - __efx_nic_tx_is_empty(partner, partner->insert_count); + + return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(partner); } /* Decide whether to push a TX descriptor to the NIC vs merely writing @@ -332,6 +343,7 @@ enum { * @pio_write_base: Base address for writing PIO buffers * @pio_write_vi_base: Relative VI number for @pio_write_base * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot * @rx_rss_context: Firmware handle for our RSS context @@ -369,6 +381,7 @@ struct efx_ef10_nic_data { void __iomem *wc_membase, *pio_write_base; unsigned int pio_write_vi_base; unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; bool must_restore_piobufs; u32 rx_rss_context; bool rx_rss_context_exclusive; @@ -613,6 +626,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4e54e5dc9fcb..af7cd8565a41 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -332,12 +332,33 @@ fail1: return rc; } +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { efx_oword_t temp; /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rx_hash_key, key, sizeof(temp)); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); @@ -402,7 +423,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx, false, efx->rx_indir_table); + siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = true; /* Enable event logging */ @@ -979,6 +1000,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -1044,6 +1066,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, + .option_descriptors = false, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1053,4 +1076,5 @@ const struct efx_nic_type siena_a0_nic_type = { .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, }; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 3c0151424d12..ff88d60aa6d5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -28,7 +28,6 @@ #ifdef EFX_USE_PIO -#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; @@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->insert_count = 0; tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 67154621abcf..97280daba27f 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -113,6 +113,7 @@ struct smc_private { struct mii_if_info mii_if; int duplex; int rx_ovrn; + unsigned long last_rx; }; /* Special definitions for Megahertz multifunction cards */ @@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev) if (!(rx_status & RS_ERRORS)) { /* do stuff to make a new packet */ struct sk_buff *skb; + struct smc_private *smc = netdev_priv(dev); /* Note: packet_length adds 5 or 6 extra bytes here! */ skb = netdev_alloc_skb(dev, packet_length+2); @@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; + smc->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += packet_length; if (rx_status & RS_MULTICAST) @@ -1790,7 +1792,7 @@ static void media_check(u_long arg) } /* Ignore collisions unless we've had no rx's recently */ - if (time_after(jiffies, dev->last_rx + HZ)) { + if (time_after(jiffies, smc->last_rx + HZ)) { if (smc->tx_err || (smc->media_status & EPH_16COL)) media |= EPH_16COL; } diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 3174aebb322f..2fa3c1d03abc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) smsc9420_pci_flush_write(pd); if (work_done < budget) { - napi_complete(&pd->napi); + napi_complete_done(&pd->napi, work_done); /* re-enable RX DMA interrupts */ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig index 1c1157d2bd40..ecd7a5edef5d 100644 --- a/drivers/net/ethernet/stmicro/Kconfig +++ b/drivers/net/ethernet/stmicro/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO default y depends on HAS_IOMEM ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet) card based on Synopsys Ethernet IP + Cores, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 99594e308db9..cfbe3634dfa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,5 +1,5 @@ config STMMAC_ETH - tristate "STMicroelectronics 10/100/1000 Ethernet driver" + tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver" depends on HAS_IOMEM && HAS_DMA select MII select PHYLIB @@ -7,9 +7,8 @@ config STMMAC_ETH imply PTP_1588_CLOCK select RESET_CONTROLLER ---help--- - This is the driver for the Ethernet IPs are built around a - Synopsys IP Core and only tested on the STMicroelectronics - platforms. + This is the driver for the Ethernet IPs built around a + Synopsys IP Core. if STMMAC_ETH @@ -152,11 +151,11 @@ config STMMAC_PCI tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- - This is to select the Synopsys DWMAC available on PCI devices, - if you have a controller with this interface, say Y or M here. + This selects the platform specific bus support for the stmmac driver. + This driver was tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit. - This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 - D1215994A VIRTEX FPGA board. + If you have a controller with this interface, say Y or M here. If unsure, say N. endif diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index ffaed1f35efe..8840a360a0b7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -35,10 +35,6 @@ #define PRG_ETH0_TXDLY_SHIFT 5 #define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) -#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) /* divider for the result of m250_sel */ #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 @@ -69,6 +65,8 @@ struct meson8b_dwmac { struct clk_divider m25_div; struct clk *m25_div_clk; + + u32 tx_delay_ns; }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, @@ -179,6 +177,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; unsigned long clk_rate; + u8 tx_dly_val; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: @@ -196,9 +195,13 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); - /* TX clock delay - all known boards use a 1/4 cycle delay */ + /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where + * 8ns are exactly one cycle of the 125MHz RGMII TX clock): + * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 + */ + tx_dly_val = dwmac->tx_delay_ns >> 1; meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, - PRG_ETH0_TXDLY_QUARTER); + tx_dly_val << PRG_ETH0_TXDLY_SHIFT); break; case PHY_INTERFACE_MODE_RMII: @@ -284,6 +287,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + /* use 2ns as fallback since this value was previously hardcoded */ + if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", + &dwmac->tx_delay_ns)) + dwmac->tx_delay_ns = 2; + ret = meson8b_init_clk(dwmac); if (ret) goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 834f40f08208..202216cd6789 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -184,7 +184,7 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) { void __iomem *ioaddr = hw->pcsr; - int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); /* Program the timers in the LPI timer control register: * LS: minimum time (ms) for which the link diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d481c5f731fd..26a2185fc8a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2684,7 +2684,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); stmmac_enable_dma_irq(priv); } return work_done; @@ -3305,9 +3305,9 @@ int stmmac_dvr_probe(struct device *device, (priv->plat->maxmtu >= ndev->min_mtu)) ndev->max_mtu = priv->plat->maxmtu; else if (priv->plat->maxmtu < ndev->min_mtu) - netdev_warn(priv->dev, - "%s: warning: maxmtu having invalid value (%d)\n", - __func__, priv->plat->maxmtu); + dev_warn(priv->device, + "%s: warning: maxmtu having invalid value (%d)\n", + __func__, priv->plat->maxmtu); if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ @@ -3319,7 +3319,8 @@ int stmmac_dvr_probe(struct device *device, */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; - netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); } netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); @@ -3345,17 +3346,17 @@ int stmmac_dvr_probe(struct device *device, /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { - netdev_err(priv->dev, - "%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); + dev_err(priv->device, + "%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); goto error_mdio_register; } } ret = register_netdev(ndev); if (ret) { - netdev_err(priv->dev, "%s: ERROR %i registering the device\n", - __func__, ret); + dev_err(priv->device, "%s: ERROR %i registering the device\n", + __func__, ret); goto error_netdev_register; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ac32f9ef7bed..460f94f5053a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -191,7 +191,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, for_each_child_of_node(np, plat->mdio_node) { if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) - break; + break; } } @@ -409,6 +409,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(plat->phy_node); + of_node_put(plat->mdio_node); } #else struct plat_stmmacenet_data * diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e557a3290a25..57978056b336 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); niu_ldg_rearm(np, lp, 1); } return work_done; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d277e4107976..5c5952e782cd 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget) gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - napi_complete(napi); + napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8878b75d68b4..191c8ade6155 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -860,7 +860,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget) int processed = vnet_event_napi(port, budget); if (processed < budget) { - napi_complete(napi); + napi_complete_done(napi, processed); port->rx_event &= ~LDC_EVENT_DATA_READY; vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); } diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig deleted file mode 100644 index 8276ee5a7d54..000000000000 --- a/drivers/net/ethernet/synopsys/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -# -# Synopsys network device configuration -# - -config NET_VENDOR_SYNOPSYS - bool "Synopsys devices" - default y - ---help--- - If you have a network (Ethernet) device belonging to this class, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Synopsys devices. If you say Y, you will be asked - for your specific device in the following questions. - -if NET_VENDOR_SYNOPSYS - -config SYNOPSYS_DWC_ETH_QOS - tristate "Sypnopsys DWC Ethernet QOS v4.10a support" - select PHYLIB - select CRC32 - select MII - depends on OF && HAS_DMA - ---help--- - This driver supports the DWC Ethernet QoS from Synopsys - -endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile deleted file mode 100644 index 7a375723fc18..000000000000 --- a/drivers/net/ethernet/synopsys/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Synopsys network device drivers. -# - -obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c deleted file mode 100644 index 467dcc53f5e1..000000000000 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ /dev/null @@ -1,2996 +0,0 @@ -/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver - * - * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). - * This version introduced a lot of changes which breaks backwards - * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). - * Some fields differ between version 4.00a and 4.10a, mainly the interrupt - * bit fields. The driver could be made compatible with 4.00, if all relevant - * HW erratas are handled. - * - * The GMAC is highly configurable at synthesis time. This driver has been - * developed for a subset of the total available feature set. Currently - * it supports: - * - TSO - * - Checksum offload for RX and TX. - * - Energy efficient ethernet. - * - GMII phy interface. - * - The statistics module. - * - Single RX and TX queue. - * - * Copyright (C) 2015 Axis Communications AB. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -#include <linux/clk.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/ethtool.h> -#include <linux/stat.h> -#include <linux/types.h> - -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> - -#include <linux/phy.h> -#include <linux/mii.h> -#include <linux/dma-mapping.h> -#include <linux/vmalloc.h> - -#include <linux/device.h> -#include <linux/bitrev.h> -#include <linux/crc32.h> - -#include <linux/of.h> -#include <linux/interrupt.h> -#include <linux/clocksource.h> -#include <linux/net_tstamp.h> -#include <linux/pm_runtime.h> -#include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_mdio.h> -#include <linux/timer.h> -#include <linux/tcp.h> - -#define DRIVER_NAME "dwceqos" -#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" -#define DRIVER_VERSION "0.9" - -#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) - -#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ - -#define DWCEQOS_LPI_TIMER_MIN 8 -#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) - -#define DWCEQOS_RX_BUF_SIZE 2048 - -#define DWCEQOS_RX_DCNT 256 -#define DWCEQOS_TX_DCNT 256 - -#define DWCEQOS_HASH_TABLE_SIZE 64 - -/* The size field in the DMA descriptor is 14 bits */ -#define BYTES_PER_DMA_DESC 16376 - -/* Hardware registers */ -#define START_MAC_REG_OFFSET 0x0000 -#define MAX_MAC_REG_OFFSET 0x0bd0 -#define START_MTL_REG_OFFSET 0x0c00 -#define MAX_MTL_REG_OFFSET 0x0d7c -#define START_DMA_REG_OFFSET 0x1000 -#define MAX_DMA_REG_OFFSET 0x117C - -#define REG_SPACE_SIZE 0x1800 - -/* DMA */ -#define REG_DWCEQOS_DMA_MODE 0x1000 -#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 -#define REG_DWCEQOS_DMA_IS 0x1008 -#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c - -/* DMA channel registers */ -#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 -#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 -#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 -#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c -#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 -#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c -#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 -#define REG_DWCEQOS_DMA_CH0_IE 0x1134 -#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 -#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c -#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 -#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c -#define REG_DWCEQOS_DMA_CH0_STA 0x1160 - -#define DWCEQOS_DMA_MODE_TXPR BIT(11) -#define DWCEQOS_DMA_MODE_DA BIT(1) - -#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) -#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) -#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) - -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ - (((x) << 16) & 0x000F0000) -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) - -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ - (((x) << 24) & 0x0F000000) -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) - -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ - (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) - -#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) -#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) - -#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) -#define DWCEQOS_DMA_CH_CTRL_START BIT(0) -#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) -#define DWCEQOS_DMA_CH_TX_OSP BIT(4) -#define DWCEQOS_DMA_CH_TX_TSE BIT(12) - -#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) -#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) -#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) -#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) -#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) -#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) - -#define DWCEQOS_DMA_IS_DC0IS BIT(0) -#define DWCEQOS_DMA_IS_MTLIS BIT(16) -#define DWCEQOS_DMA_IS_MACIS BIT(17) - -#define DWCEQOS_DMA_CH0_IS_TI BIT(0) -#define DWCEQOS_DMA_CH0_IS_RI BIT(6) -#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) -#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) -#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) -#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) - -#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) - -#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) - -/* DMA descriptor bits for RX normal descriptor (read format) */ -#define DWCEQOS_DMA_RDES3_OWN BIT(31) -#define DWCEQOS_DMA_RDES3_INTE BIT(30) -#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) -#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) - -/* DMA descriptor bits for RX normal descriptor (write back format) */ -#define DWCEQOS_DMA_RDES1_IPCE BIT(7) -#define DWCEQOS_DMA_RDES3_ES BIT(15) -#define DWCEQOS_DMA_RDES3_E_JT BIT(14) -#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) -#define DWCEQOS_DMA_RDES1_PT 0x00000007 -#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) -#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) -#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 - -/* DMA descriptor bits for TX normal descriptor (read format) */ -#define DWCEQOS_DMA_TDES2_IOC BIT(31) -#define DWCEQOS_DMA_TDES3_OWN BIT(31) -#define DWCEQOS_DMA_TDES3_CTXT BIT(30) -#define DWCEQOS_DMA_TDES3_FD BIT(29) -#define DWCEQOS_DMA_TDES3_LD BIT(28) -#define DWCEQOS_DMA_TDES3_CIPH BIT(16) -#define DWCEQOS_DMA_TDES3_CIPP BIT(17) -#define DWCEQOS_DMA_TDES3_CA 0x00030000 -#define DWCEQOS_DMA_TDES3_TSE BIT(18) -#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) -#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) - -#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) - -/* DMA channel states */ -#define DMA_TX_CH_STOPPED 0 -#define DMA_TX_CH_SUSPENDED 6 - -#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) - -/* MTL */ -#define REG_DWCEQOS_MTL_OPER 0x0c00 -#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c -#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 -#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 - -#define REG_DWCEQOS_MTL_IS 0x0c20 -#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 -#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 -#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 -#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c - -#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c - -#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 - -#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) -#define DWCEQOS_MTL_TXQ_TSF BIT(1) -#define DWCEQOS_MTL_TXQ_FTQ BIT(0) -#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 - -#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) - -#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) -#define DWCEQOS_MTL_RXQ_EHFC BIT(7) -#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) -#define DWCEQOS_MTL_RXQ_FEP BIT(4) -#define DWCEQOS_MTL_RXQ_FUP BIT(3) -#define DWCEQOS_MTL_RXQ_RSF BIT(5) -#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) - -/* MAC */ -#define REG_DWCEQOS_MAC_CFG 0x0000 -#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 -#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 -#define REG_DWCEQOS_MAC_WD_TO 0x000c -#define REG_DWCEQOS_HASTABLE_LO 0x0010 -#define REG_DWCEQOS_HASTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_IS 0x00b0 -#define REG_DWCEQOS_MAC_IE 0x00b4 -#define REG_DWCEQOS_MAC_STAT 0x00b8 -#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 -#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 -#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 -#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 -#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 -#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c -#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 -#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 -#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 -#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 -#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 -#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 -#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc -#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 -#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 - -#define DWCEQOS_MAC_CFG_ACS BIT(20) -#define DWCEQOS_MAC_CFG_JD BIT(17) -#define DWCEQOS_MAC_CFG_JE BIT(16) -#define DWCEQOS_MAC_CFG_PS BIT(15) -#define DWCEQOS_MAC_CFG_FES BIT(14) -#define DWCEQOS_MAC_CFG_DM BIT(13) -#define DWCEQOS_MAC_CFG_DO BIT(10) -#define DWCEQOS_MAC_CFG_TE BIT(1) -#define DWCEQOS_MAC_CFG_IPC BIT(27) -#define DWCEQOS_MAC_CFG_RE BIT(0) - -#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) -#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) - -#define DWCEQOS_MAC_IS_LPI_INT BIT(5) -#define DWCEQOS_MAC_IS_MMC_INT BIT(8) - -#define DWCEQOS_MAC_RXQ_EN BIT(1) -#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) -#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) -#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) -#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) -#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) -#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) -#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) -#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) -#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) -#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) -#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) - -#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) -#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 -#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 -#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 -#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 -#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 -#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 -#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c -#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) -#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) - -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) - -#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) - -#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) - -#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) - -#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) -#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) -#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) - -/* Features */ -#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) -#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) -#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) -#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) -#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) -#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) - -#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) -#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) -#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) - -#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ - (1 + (((feature1) & 0x1fc0000) >> 18)) - -#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) -#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) - -#define DWCEQOS_DMA_MODE_SWR BIT(0) - -#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 - -/* Mac Management Counters */ -#define REG_DWCEQOS_MMC_CTRL 0x0700 -#define REG_DWCEQOS_MMC_RXIRQ 0x0704 -#define REG_DWCEQOS_MMC_TXIRQ 0x0708 -#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c -#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 - -#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) -#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) - -#define DWC_MMC_TXLPITRANSCNTR 0x07F0 -#define DWC_MMC_TXLPIUSCNTR 0x07EC -#define DWC_MMC_TXOVERSIZE_G 0x0778 -#define DWC_MMC_TXVLANPACKETS_G 0x0774 -#define DWC_MMC_TXPAUSEPACKETS 0x0770 -#define DWC_MMC_TXEXCESSDEF 0x076C -#define DWC_MMC_TXPACKETCOUNT_G 0x0768 -#define DWC_MMC_TXOCTETCOUNT_G 0x0764 -#define DWC_MMC_TXCARRIERERROR 0x0760 -#define DWC_MMC_TXEXCESSCOL 0x075C -#define DWC_MMC_TXLATECOL 0x0758 -#define DWC_MMC_TXDEFERRED 0x0754 -#define DWC_MMC_TXMULTICOL_G 0x0750 -#define DWC_MMC_TXSINGLECOL_G 0x074C -#define DWC_MMC_TXUNDERFLOWERROR 0x0748 -#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 -#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 -#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C -#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 -#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 -#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 -#define DWC_MMC_TX128TO255OCTETS_GB 0x072C -#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 -#define DWC_MMC_TX64OCTETS_GB 0x0724 -#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 -#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C -#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 -#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 - -#define DWC_MMC_RXLPITRANSCNTR 0x07F8 -#define DWC_MMC_RXLPIUSCNTR 0x07F4 -#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 -#define DWC_MMC_RXRCVERROR 0x07E0 -#define DWC_MMC_RXWATCHDOG 0x07DC -#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 -#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 -#define DWC_MMC_RXPAUSEPACKETS 0x07D0 -#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC -#define DWC_MMC_RXLENGTHERROR 0x07C8 -#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 -#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 -#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC -#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 -#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 -#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 -#define DWC_MMC_RX64OCTETS_GB 0x07AC -#define DWC_MMC_RXOVERSIZE_G 0x07A8 -#define DWC_MMC_RXUNDERSIZE_G 0x07A4 -#define DWC_MMC_RXJABBERERROR 0x07A0 -#define DWC_MMC_RXRUNTERROR 0x079C -#define DWC_MMC_RXALIGNMENTERROR 0x0798 -#define DWC_MMC_RXCRCERROR 0x0794 -#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 -#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C -#define DWC_MMC_RXOCTETCOUNT_G 0x0788 -#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 -#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 - -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); - -/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ -struct ring_desc { - struct sk_buff *skb; - dma_addr_t mapping; - size_t len; -}; - -/* DMA hardware descriptor */ -struct dwceqos_dma_desc { - u32 des0; - u32 des1; - u32 des2; - u32 des3; -} ____cacheline_aligned; - -struct dwceqos_mmc_counters { - __u64 txlpitranscntr; - __u64 txpiuscntr; - __u64 txoversize_g; - __u64 txvlanpackets_g; - __u64 txpausepackets; - __u64 txexcessdef; - __u64 txpacketcount_g; - __u64 txoctetcount_g; - __u64 txcarriererror; - __u64 txexcesscol; - __u64 txlatecol; - __u64 txdeferred; - __u64 txmulticol_g; - __u64 txsinglecol_g; - __u64 txunderflowerror; - __u64 txbroadcastpackets_gb; - __u64 txmulticastpackets_gb; - __u64 txunicastpackets_gb; - __u64 tx1024tomaxoctets_gb; - __u64 tx512to1023octets_gb; - __u64 tx256to511octets_gb; - __u64 tx128to255octets_gb; - __u64 tx65to127octets_gb; - __u64 tx64octets_gb; - __u64 txmulticastpackets_g; - __u64 txbroadcastpackets_g; - __u64 txpacketcount_gb; - __u64 txoctetcount_gb; - - __u64 rxlpitranscntr; - __u64 rxlpiuscntr; - __u64 rxctrlpackets_g; - __u64 rxrcverror; - __u64 rxwatchdog; - __u64 rxvlanpackets_gb; - __u64 rxfifooverflow; - __u64 rxpausepackets; - __u64 rxoutofrangetype; - __u64 rxlengtherror; - __u64 rxunicastpackets_g; - __u64 rx1024tomaxoctets_gb; - __u64 rx512to1023octets_gb; - __u64 rx256to511octets_gb; - __u64 rx128to255octets_gb; - __u64 rx65to127octets_gb; - __u64 rx64octets_gb; - __u64 rxoversize_g; - __u64 rxundersize_g; - __u64 rxjabbererror; - __u64 rxrunterror; - __u64 rxalignmenterror; - __u64 rxcrcerror; - __u64 rxmulticastpackets_g; - __u64 rxbroadcastpackets_g; - __u64 rxoctetcount_g; - __u64 rxoctetcount_gb; - __u64 rxpacketcount_gb; -}; - -/* Ethtool statistics */ - -struct dwceqos_stat { - const char stat_name[ETH_GSTRING_LEN]; - int offset; -}; - -#define STAT_ITEM(name, var) \ - {\ - name,\ - offsetof(struct dwceqos_mmc_counters, var),\ - } - -static const struct dwceqos_stat dwceqos_ethtool_stats[] = { - STAT_ITEM("tx_bytes", txoctetcount_gb), - STAT_ITEM("tx_packets", txpacketcount_gb), - STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), - STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), - STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), - STAT_ITEM("tx_pause_packets", txpausepackets), - STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), - STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), - STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), - STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), - STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), - STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), - STAT_ITEM("tx_underflow_errors", txunderflowerror), - STAT_ITEM("tx_lpi_count", txlpitranscntr), - - STAT_ITEM("rx_bytes", rxoctetcount_gb), - STAT_ITEM("rx_packets", rxpacketcount_gb), - STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), - STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), - STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), - STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), - STAT_ITEM("rx_pause_packets", rxpausepackets), - STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), - STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), - STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), - STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), - STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), - STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), - STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), - STAT_ITEM("rx_oversize_packets", rxoversize_g), - STAT_ITEM("rx_undersize_packets", rxundersize_g), - STAT_ITEM("rx_jabbers", rxjabbererror), - STAT_ITEM("rx_align_errors", rxalignmenterror), - STAT_ITEM("rx_crc_errors", rxcrcerror), - STAT_ITEM("rx_lpi_count", rxlpitranscntr), -}; - -/* Configuration of AXI bus parameters. - * These values depend on the parameters set on the MAC core as well - * as the AXI interconnect. - */ -struct dwceqos_bus_cfg { - /* Enable AXI low-power interface. */ - bool en_lpi; - /* Limit on number of outstanding AXI write requests. */ - u32 write_requests; - /* Limit on number of outstanding AXI read requests. */ - u32 read_requests; - /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ - u32 burst_map; - /* DMA Programmable burst length*/ - u32 tx_pbl; - u32 rx_pbl; -}; - -struct dwceqos_flowcontrol { - int autoneg; - int rx; - int rx_current; - int tx; - int tx_current; -}; - -struct net_local { - void __iomem *baseaddr; - struct clk *phy_ref_clk; - struct clk *apb_pclk; - - struct device_node *phy_node; - struct net_device *ndev; - struct platform_device *pdev; - - u32 msg_enable; - - struct tasklet_struct tx_bdreclaim_tasklet; - struct workqueue_struct *txtimeout_handler_wq; - struct work_struct txtimeout_reinit; - - phy_interface_t phy_interface; - struct mii_bus *mii_bus; - - unsigned int link; - unsigned int speed; - unsigned int duplex; - - struct napi_struct napi; - - /* DMA Descriptor Areas */ - struct ring_desc *rx_skb; - struct ring_desc *tx_skb; - - struct dwceqos_dma_desc *tx_descs; - struct dwceqos_dma_desc *rx_descs; - - /* DMA Mapped Descriptor areas*/ - dma_addr_t tx_descs_addr; - dma_addr_t rx_descs_addr; - dma_addr_t tx_descs_tail_addr; - dma_addr_t rx_descs_tail_addr; - - size_t tx_free; - size_t tx_next; - size_t rx_cur; - size_t tx_cur; - - /* Spinlocks for accessing DMA Descriptors */ - spinlock_t tx_lock; - - /* Spinlock for register read-modify-writes. */ - spinlock_t hw_lock; - - u32 feature0; - u32 feature1; - u32 feature2; - - struct dwceqos_bus_cfg bus_cfg; - bool en_tx_lpi_clockgating; - - int eee_enabled; - int eee_active; - int csr_val; - u32 gso_size; - - struct dwceqos_mmc_counters mmc_counters; - /* Protect the mmc_counter updates. */ - spinlock_t stats_lock; - u32 mmc_rx_counters_mask; - u32 mmc_tx_counters_mask; - - struct dwceqos_flowcontrol flowcontrol; - - /* Tracks the intermediate state of phy started but hardware - * init not finished yet. - */ - bool phy_defer; -}; - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask); - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n); -static int dwceqos_stop(struct net_device *ndev); -static int dwceqos_open(struct net_device *ndev); -static void dwceqos_tx_poll_demand(struct net_local *lp); - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); - -static void dwceqos_reset_state(struct net_local *lp); - -#define dwceqos_read(lp, reg) \ - readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) -#define dwceqos_write(lp, reg, val) \ - writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) - -static void dwceqos_reset_state(struct net_local *lp) -{ - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.rx_current = 0; - lp->flowcontrol.tx_current = 0; - lp->eee_active = 0; - lp->eee_enabled = 0; -} - -static void print_descriptor(struct net_local *lp, int index, int tx) -{ - struct dwceqos_dma_desc *dd; - - if (tx) - dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; - else - dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; - - pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", - index, dd); - pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, - dd->des3); -} - -static void print_status(struct net_local *lp) -{ - size_t desci, i; - - pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, - lp->tx_cur, lp->tx_next); - - print_descriptor(lp, lp->rx_cur, 0); - - for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; - i < DWCEQOS_TX_DCNT; - ++i) { - print_descriptor(lp, desci, 1); - desci = (desci + 1) % DWCEQOS_TX_DCNT; - } - - pr_info("DMA_Debug_Status0: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); - pr_info("DMA_CH0_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); - pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", - dwceqos_read(lp, 0x1144)); - pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", - dwceqos_read(lp, 0x1154)); - pr_info("MTL_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); - pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); - pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); - pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); -} - -static void dwceqos_mdio_set_csr(struct net_local *lp) -{ - int rate = clk_get_rate(lp->apb_pclk); - - if (rate <= 20000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; - else if (rate <= 35000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; - else if (rate <= 60000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; - else if (rate <= 100000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; - else if (rate <= 150000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; - else if (rate <= 250000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; -} - -/* Simple MDIO functions implementing mii_bus */ -static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - int data; - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_READ; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - - data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); - if (i == 5) { - netdev_warn(lp->ndev, "MDIO read timed out\n"); - data = 0xffff; - } - - return data & 0xffff; -} - -static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, - u16 value) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - if (i == 5) - netdev_warn(lp->ndev, "MDIO write timed out\n"); - return 0; -} - -static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return phy_mii_ioctl(phydev, rq, cmd); - default: - dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); - return -EOPNOTSUPP; - } -} - -static void dwceqos_link_down(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - /* Indicate link down to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_link_up(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - unsigned long flags; - - /* Indicate link up to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - - lp->eee_active = !phy_init_eee(ndev->phydev, 0); - - /* Check for changed EEE capability */ - if (!lp->eee_active && lp->eee_enabled) { - lp->eee_enabled = 0; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } -} - -static void dwceqos_set_speed(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - struct phy_device *phydev = ndev->phydev; - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | - DWCEQOS_MAC_CFG_DM); - - if (phydev->duplex) - regval |= DWCEQOS_MAC_CFG_DM; - if (phydev->speed == SPEED_10) { - regval |= DWCEQOS_MAC_CFG_PS; - } else if (phydev->speed == SPEED_100) { - regval |= DWCEQOS_MAC_CFG_PS | - DWCEQOS_MAC_CFG_FES; - } else if (phydev->speed != SPEED_1000) { - netdev_err(lp->ndev, - "unknown PHY speed %d\n", - phydev->speed); - return; - } - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); -} - -static void dwceqos_adjust_link(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; - - if (lp->phy_defer) - return; - - if (phydev->link) { - if ((lp->speed != phydev->speed) || - (lp->duplex != phydev->duplex)) { - dwceqos_set_speed(lp); - - lp->speed = phydev->speed; - lp->duplex = phydev->duplex; - status_change = 1; - } - - if (lp->flowcontrol.autoneg) { - lp->flowcontrol.rx = phydev->pause || - phydev->asym_pause; - lp->flowcontrol.tx = phydev->pause || - phydev->asym_pause; - } - - if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set rx flow to %d\n", - lp->flowcontrol.rx); - dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); - lp->flowcontrol.rx_current = lp->flowcontrol.rx; - } - if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set tx flow to %d\n", - lp->flowcontrol.tx); - dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); - lp->flowcontrol.tx_current = lp->flowcontrol.tx; - } - } - - if (phydev->link != lp->link) { - lp->link = phydev->link; - status_change = 1; - } - - if (status_change) { - if (phydev->link) { - netif_trans_update(lp->ndev); - dwceqos_link_up(lp); - } else { - dwceqos_link_down(lp); - } - phy_print_status(phydev); - } -} - -static int dwceqos_mii_probe(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = NULL; - - if (lp->phy_node) { - phydev = of_phy_connect(lp->ndev, - lp->phy_node, - &dwceqos_adjust_link, - 0, - lp->phy_interface); - - if (!phydev) { - netdev_err(ndev, "no PHY found\n"); - return -1; - } - } else { - netdev_err(ndev, "no PHY configured\n"); - return -ENODEV; - } - - if (netif_msg_probe(lp)) - phy_attached_info(phydev); - - phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.autoneg = AUTONEG_ENABLE; - - return 0; -} - -static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) -{ - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); - goto err_out; - } - - new_skb_baddr = dma_map_single(lp->ndev->dev.parent, - new_skb->data, DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - new_skb = NULL; - goto err_out; - } - - lp->rx_descs[index].des0 = new_skb_baddr; - lp->rx_descs[index].des1 = 0; - lp->rx_descs[index].des2 = 0; - lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_BUF1V | - DWCEQOS_DMA_RDES3_OWN; - - lp->rx_skb[index].mapping = new_skb_baddr; - lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; - -err_out: - lp->rx_skb[index].skb = new_skb; -} - -static void dwceqos_clean_rings(struct net_local *lp) -{ - int i; - - if (lp->rx_skb) { - for (i = 0; i < DWCEQOS_RX_DCNT; i++) { - if (lp->rx_skb[i].skb) { - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[i].mapping, - lp->rx_skb[i].len, - DMA_FROM_DEVICE); - - dev_kfree_skb(lp->rx_skb[i].skb); - lp->rx_skb[i].skb = NULL; - lp->rx_skb[i].mapping = 0; - } - } - } - - if (lp->tx_skb) { - for (i = 0; i < DWCEQOS_TX_DCNT; i++) { - if (lp->tx_skb[i].skb) { - dev_kfree_skb(lp->tx_skb[i].skb); - lp->tx_skb[i].skb = NULL; - } - if (lp->tx_skb[i].mapping) { - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - lp->tx_skb[i].mapping = 0; - } - } - } -} - -static void dwceqos_descriptor_free(struct net_local *lp) -{ - int size; - - dwceqos_clean_rings(lp); - - kfree(lp->tx_skb); - lp->tx_skb = NULL; - kfree(lp->rx_skb); - lp->rx_skb = NULL; - - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->rx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->rx_descs), lp->rx_descs_addr); - lp->rx_descs = NULL; - } - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->tx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->tx_descs), lp->tx_descs_addr); - lp->tx_descs = NULL; - } -} - -static int dwceqos_descriptor_init(struct net_local *lp) -{ - int size; - u32 i; - - lp->gso_size = 0; - - lp->tx_skb = NULL; - lp->rx_skb = NULL; - lp->rx_descs = NULL; - lp->tx_descs = NULL; - - /* Reset the DMA indexes */ - lp->rx_cur = 0; - lp->tx_cur = 0; - lp->tx_next = 0; - lp->tx_free = DWCEQOS_TX_DCNT; - - /* Allocate Ring descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); - lp->rx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->rx_skb) - goto err_out; - - size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); - lp->tx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->tx_skb) - goto err_out; - - /* Allocate DMA descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, GFP_KERNEL); - if (!lp->rx_descs) - goto err_out; - lp->rx_descs_tail_addr = lp->rx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, GFP_KERNEL); - if (!lp->tx_descs) - goto err_out; - lp->tx_descs_tail_addr = lp->tx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; - - /* Initialize RX Ring Descriptors and buffers */ - for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { - dwceqos_alloc_rxring_desc(lp, i); - if (!(lp->rx_skb[lp->rx_cur].skb)) - goto err_out; - } - - /* Initialize TX Descriptors */ - for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { - lp->tx_descs[i].des0 = 0; - lp->tx_descs[i].des1 = 0; - lp->tx_descs[i].des2 = 0; - lp->tx_descs[i].des3 = 0; - } - - /* Make descriptor writes visible to the DMA. */ - wmb(); - - return 0; - -err_out: - dwceqos_descriptor_free(lp); - return -ENOMEM; -} - -static int dwceqos_packet_avail(struct net_local *lp) -{ - return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); -} - -static void dwceqos_get_hwfeatures(struct net_local *lp) -{ - lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); - lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); - lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); -} - -static void dwceqos_dma_enable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_enable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_enable_mmc_interrupt(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); -} - -static int dwceqos_mii_init(struct net_local *lp) -{ - int ret = -ENXIO; - struct resource res; - struct device_node *mdionode; - - mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); - - if (!mdionode) - return 0; - - lp->mii_bus = mdiobus_alloc(); - if (!lp->mii_bus) { - ret = -ENOMEM; - goto err_out; - } - - lp->mii_bus->name = "DWCEQOS MII bus"; - lp->mii_bus->read = &dwceqos_mdio_read; - lp->mii_bus->write = &dwceqos_mdio_write; - lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->pdev->dev; - - of_address_to_resource(lp->pdev->dev.of_node, 0, &res); - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", - (unsigned long long)res.start); - if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdiobus; - - return 0; - -err_out_free_mdiobus: - mdiobus_free(lp->mii_bus); -err_out: - of_node_put(mdionode); - return ret; -} - -/* DMA reset. When issued also resets all MTL and MAC registers as well */ -static void dwceqos_reset_hw(struct net_local *lp) -{ - /* Wait (at most) 0.5 seconds for DMA reset*/ - int i = 5000; - u32 reg; - - /* Force gigabit to guarantee a TX clock for GMII. */ - reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); - reg |= DWCEQOS_MAC_CFG_DM; - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); - - do { - udelay(100); - i--; - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); - } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); - /* We might experience a timeout if the chip clock mux is broken */ - if (!i) - netdev_err(lp->ndev, "DMA reset timed out!\n"); -} - -static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) -{ - if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { - netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } - if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { - netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } -} - -static void dwceqos_mmc_interrupt(struct net_local *lp) -{ - unsigned long flags; - - spin_lock_irqsave(&lp->stats_lock, flags); - - /* A latched mmc interrupt can not be masked, we must read - * all the counters with an interrupt pending. - */ - dwceqos_read_mmc_counters(lp, - dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), - dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); - - spin_unlock_irqrestore(&lp->stats_lock, flags); -} - -static void dwceqos_mac_interrupt(struct net_local *lp) -{ - u32 cause; - - cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); - - if (cause & DWCEQOS_MAC_IS_MMC_INT) - dwceqos_mmc_interrupt(lp); -} - -static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = dev_id; - struct net_local *lp = netdev_priv(ndev); - - u32 cause; - u32 dma_status; - irqreturn_t ret = IRQ_NONE; - - cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); - /* DMA Channel 0 Interrupt */ - if (cause & DWCEQOS_DMA_IS_DC0IS) { - dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); - - /* Transmit Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { - tasklet_schedule(&lp->tx_bdreclaim_tasklet); - dwceqos_dma_disable_txirq(lp); - } - - /* Receive Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { - /* Disable RX IRQs */ - dwceqos_dma_disable_rxirq(lp); - napi_schedule(&lp->napi); - } - - /* Fatal Bus Error interrupt */ - if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { - dwceqos_fatal_bus_error(lp, dma_status); - - /* errata 9000831707 */ - dma_status |= DWCEQOS_DMA_CH0_IS_TEB | - DWCEQOS_DMA_CH0_IS_REB; - } - - /* Ack all DMA Channel 0 IRQs */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MTLIS) { - u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); - - dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MACIS) { - dwceqos_mac_interrupt(lp); - ret = IRQ_HANDLED; - } - return ret; -} - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); - if (enable) - regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - else - regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - /* MTL flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - if (enable) - regval |= DWCEQOS_MTL_RXQ_EHFC; - else - regval &= ~DWCEQOS_MTL_RXQ_EHFC; - - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - /* MAC flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); - if (enable) - regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; - else - regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_flow_control(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - int RQS, RFD, RFA; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - - /* The queue size is in units of 256 bytes. We want 512 bytes units for - * the threshold fields. - */ - RQS = ((regval >> 20) & 0x3FF) + 1; - RQS /= 2; - - /* The thresholds are relative to a full queue, with a bias - * of 1 KiByte below full. - */ - RFD = RQS / 2 - 2; - RFA = RQS / 8 - 2; - - regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); - - if (RFD >= 0 && RFA >= 0) { - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - } else { - netdev_warn(lp->ndev, - "FIFO too small for flow control."); - } - - regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | - DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; - - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_clock(struct net_local *lp) -{ - unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; - - BUG_ON(!rate_mhz); - - dwceqos_write(lp, - REG_DWCEQOS_MAC_1US_TIC_COUNTER, - DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); -} - -static void dwceqos_configure_bus(struct net_local *lp) -{ - u32 sysbus_reg; - - /* N.B. We do not support the Fixed Burst mode because it - * opens a race window by making HW access to DMA descriptors - * non-atomic. - */ - - sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; - - if (lp->bus_cfg.en_lpi) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; - - if (lp->bus_cfg.burst_map) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - lp->bus_cfg.burst_map); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); - - if (lp->bus_cfg.read_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - lp->bus_cfg.read_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); - - if (lp->bus_cfg.write_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - lp->bus_cfg.write_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); - - if (netif_msg_hw(lp)) - netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); -} - -static void dwceqos_init_hw(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - u32 buswidth; - u32 dma_skip; - - /* Software reset */ - dwceqos_reset_hw(lp); - - dwceqos_configure_bus(lp); - - /* Probe data bus width, 32/64/128 bits. */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); - buswidth = (regval ^ 0xF) + 1; - - /* Cache-align dma descriptors. */ - dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, - DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | - DWCEQOS_DMA_CH_CTRL_PBLX8); - - /* Initialize DMA Channel 0 */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, - (u32)lp->tx_descs_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, - (u32)lp->rx_descs_addr); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - if (lp->bus_cfg.tx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - /* Enable TSO if the HW support it */ - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - regval |= DWCEQOS_DMA_CH_TX_TSE; - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); - - if (lp->bus_cfg.rx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - regval |= DWCEQOS_DMA_CH_CTRL_START; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - /* Initialize MTL Queues */ - regval = DWCEQOS_MTL_SCHALG_STRICT; - dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); - - regval = DWCEQOS_MTL_TXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | - DWCEQOS_MTL_TXQ_TTC512; - dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); - - regval = DWCEQOS_MTL_RXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - dwceqos_configure_flow_control(lp); - - /* Initialize MAC */ - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - lp->eee_enabled = 0; - - dwceqos_configure_clock(lp); - - /* MMC counters */ - - /* probe implemented counters */ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); - lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); - lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); - - dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | - DWCEQOS_MMC_CTRL_RSTONRD); - dwceqos_enable_mmc_interrupt(lp); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); - dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | - DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - /* Start TX DMA */ - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, - regval | DWCEQOS_DMA_CH_CTRL_START); - - /* Enable MAC TX/RX */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, - regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - lp->phy_defer = false; - mutex_lock(&ndev->phydev->lock); - phy_read_status(ndev->phydev); - dwceqos_adjust_link(lp->ndev); - mutex_unlock(&ndev->phydev->lock); -} - -static void dwceqos_tx_reclaim(unsigned long data) -{ - struct net_device *ndev = (struct net_device *)data; - struct net_local *lp = netdev_priv(ndev); - unsigned int tx_bytes = 0; - unsigned int tx_packets = 0; - - spin_lock(&lp->tx_lock); - - while (lp->tx_free < DWCEQOS_TX_DCNT) { - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; - struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; - - /* Descriptor still being held by DMA ? */ - if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) - break; - - if (rd->mapping) - dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, - DMA_TO_DEVICE); - - if (unlikely(rd->skb)) { - ++tx_packets; - tx_bytes += rd->skb->len; - dev_consume_skb_any(rd->skb); - } - - rd->skb = NULL; - rd->mapping = 0; - lp->tx_free++; - lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; - - if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && - (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { - if (netif_msg_tx_err(lp)) - netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", - dd->des3); - if (netif_msg_hw(lp)) - print_status(lp); - } - } - spin_unlock(&lp->tx_lock); - - netdev_completed_queue(ndev, tx_packets, tx_bytes); - - dwceqos_dma_enable_txirq(lp); - netif_wake_queue(ndev); -} - -static int dwceqos_rx(struct net_local *lp, int budget) -{ - struct sk_buff *skb; - u32 tot_size = 0; - unsigned int n_packets = 0; - unsigned int n_descs = 0; - u32 len; - - struct dwceqos_dma_desc *dd; - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - while (n_descs < budget) { - if (!dwceqos_packet_avail(lp)) - break; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "no memory for new sk_buff\n"); - break; - } - - /* Get dma handle of skb->data */ - new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, - new_skb->data, - DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - break; - } - - /* Read descriptor data after reading owner bit. */ - dma_rmb(); - - dd = &lp->rx_descs[lp->rx_cur]; - len = DWCEQOS_DMA_RDES3_PL(dd->des3); - skb = lp->rx_skb[lp->rx_cur].skb; - - /* Unmap old buffer */ - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[lp->rx_cur].mapping, - lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); - - /* Discard packet on reception error or bad checksum */ - if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || - (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { - dev_kfree_skb(skb); - skb = NULL; - } else { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, lp->ndev); - switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { - case DWCEQOS_DMA_RDES1_PT_UDP: - case DWCEQOS_DMA_RDES1_PT_TCP: - case DWCEQOS_DMA_RDES1_PT_ICMP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } - } - - if (unlikely(!skb)) { - if (netif_msg_rx_err(lp)) - netdev_dbg(lp->ndev, "rx error: des3=%X\n", - lp->rx_descs[lp->rx_cur].des3); - } else { - tot_size += skb->len; - n_packets++; - - netif_receive_skb(skb); - } - - lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; - lp->rx_descs[lp->rx_cur].des1 = 0; - lp->rx_descs[lp->rx_cur].des2 = 0; - /* The DMA must observe des0/1/2 written before des3. */ - wmb(); - lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_OWN | - DWCEQOS_DMA_RDES3_BUF1V; - - lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; - lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; - lp->rx_skb[lp->rx_cur].skb = new_skb; - - n_descs++; - lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; - } - - /* Make sure any ownership update is written to the descriptors before - * DMA wakeup. - */ - wmb(); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); - /* Wake up RX by writing tail pointer */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - return n_descs; -} - -static int dwceqos_rx_poll(struct napi_struct *napi, int budget) -{ - struct net_local *lp = container_of(napi, struct net_local, napi); - int work_done = 0; - - work_done = dwceqos_rx(lp, budget - work_done); - - if (!dwceqos_packet_avail(lp) && work_done < budget) { - napi_complete(napi); - dwceqos_dma_enable_rxirq(lp); - } else { - work_done = budget; - } - - return work_done; -} - -/* Reinitialize function if a TX timed out */ -static void dwceqos_reinit_for_txtimeout(struct work_struct *data) -{ - struct net_local *lp = container_of(data, struct net_local, - txtimeout_reinit); - - netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", - DWCEQOS_TX_TIMEOUT); - - if (netif_msg_hw(lp)) - print_status(lp); - - rtnl_lock(); - dwceqos_stop(lp->ndev); - dwceqos_open(lp->ndev); - rtnl_unlock(); -} - -/* DT Probing function called by main probe */ -static inline int dwceqos_probe_config_dt(struct platform_device *pdev) -{ - struct net_device *ndev; - struct net_local *lp; - const void *mac_address; - struct dwceqos_bus_cfg *bus_cfg; - struct device_node *np = pdev->dev.of_node; - - ndev = platform_get_drvdata(pdev); - lp = netdev_priv(ndev); - bus_cfg = &lp->bus_cfg; - - /* Set the MAC address. */ - mac_address = of_get_mac_address(pdev->dev.of_node); - if (mac_address) - ether_addr_copy(ndev->dev_addr, mac_address); - - /* These are all optional parameters */ - lp->en_tx_lpi_clockgating = of_property_read_bool(np, - "snps,en-tx-lpi-clockgating"); - bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); - of_property_read_u32(np, "snps,write-requests", - &bus_cfg->write_requests); - of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); - of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); - of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); - of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); - - netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", - bus_cfg->en_lpi, - bus_cfg->write_requests, - bus_cfg->read_requests, - bus_cfg->burst_map, - bus_cfg->rx_pbl, - bus_cfg->tx_pbl); - - return 0; -} - -static int dwceqos_open(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - int res; - - dwceqos_reset_state(lp); - res = dwceqos_descriptor_init(lp); - if (res) { - netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); - return res; - } - netdev_reset_queue(ndev); - - /* The dwceqos reset state machine requires all phy clocks to complete, - * hence the unusual init order with phy_start first. - */ - lp->phy_defer = true; - phy_start(ndev->phydev); - dwceqos_init_hw(lp); - napi_enable(&lp->napi); - - netif_start_queue(ndev); - tasklet_enable(&lp->tx_bdreclaim_tasklet); - - /* Enable Interrupts -- do this only after we enable NAPI and the - * tasklet. - */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - - return 0; -} - -static bool dweqos_is_tx_dma_suspended(struct net_local *lp) -{ - u32 reg; - - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); - reg = DMA_GET_TX_STATE_CH0(reg); - - return reg == DMA_TX_CH_SUSPENDED; -} - -static void dwceqos_drain_dma(struct net_local *lp) -{ - /* Wait for all pending TX buffers to be sent. Upper limit based - * on max frame size on a 10 Mbit link. - */ - size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; - - while (!dweqos_is_tx_dma_suspended(lp) && limit--) - usleep_range(100, 200); -} - -static int dwceqos_stop(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - tasklet_disable(&lp->tx_bdreclaim_tasklet); - napi_disable(&lp->napi); - - /* Stop all tx before we drain the tx dma. */ - netif_tx_lock_bh(lp->ndev); - netif_stop_queue(ndev); - netif_tx_unlock_bh(lp->ndev); - - dwceqos_drain_dma(lp); - dwceqos_reset_hw(lp); - phy_stop(ndev->phydev); - - dwceqos_descriptor_free(lp); - - return 0; -} - -static void dwceqos_dmadesc_set_ctx(struct net_local *lp, - unsigned short gso_size) -{ - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; - - dd->des0 = 0; - dd->des1 = 0; - dd->des2 = gso_size; - dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; - - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; -} - -static void dwceqos_tx_poll_demand(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); -} - -struct dwceqos_tx { - size_t nr_descriptors; - size_t initial_descriptor; - size_t last_descriptor; - size_t prev_gso_size; - size_t network_header_len; -}; - -static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - size_t n = 1; - size_t i; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) - ++n; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / - BYTES_PER_DMA_DESC; - } - - tx->nr_descriptors = n; - tx->initial_descriptor = lp->tx_next; - tx->last_descriptor = lp->tx_next; - tx->prev_gso_size = lp->gso_size; - - tx->network_header_len = skb_transport_offset(skb); - if (skb_is_gso(skb)) - tx->network_header_len += tcp_hdrlen(skb); -} - -static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd; - struct dwceqos_dma_desc *dd; - size_t payload_len; - dma_addr_t dma_handle; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { - dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); - lp->gso_size = skb_shinfo(skb)->gso_size; - } - - dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "TX DMA Mapping error\n"); - return -ENOMEM; - } - - rd = &lp->tx_skb[lp->tx_next]; - dd = &lp->tx_descs[lp->tx_next]; - - rd->skb = NULL; - rd->len = skb_headlen(skb); - rd->mapping = dma_handle; - - /* Set up DMA Descriptor */ - dd->des0 = dma_handle; - - if (skb_is_gso(skb)) { - payload_len = skb_headlen(skb) - tx->network_header_len; - - if (payload_len) - dd->des1 = dma_handle + tx->network_header_len; - dd->des2 = tx->network_header_len | - DWCEQOS_DMA_DES2_B2L(payload_len); - dd->des3 = DWCEQOS_DMA_TDES3_TSE | - DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | - (skb->len - tx->network_header_len); - } else { - dd->des1 = 0; - dd->des2 = skb_headlen(skb); - dd->des3 = skb->len; - - switch (skb->ip_summed) { - case CHECKSUM_PARTIAL: - dd->des3 |= DWCEQOS_DMA_TDES3_CA; - case CHECKSUM_NONE: - case CHECKSUM_UNNECESSARY: - case CHECKSUM_COMPLETE: - default: - break; - } - } - - dd->des3 |= DWCEQOS_DMA_TDES3_FD; - if (lp->tx_next != tx->initial_descriptor) - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - - return 0; -} - -static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd = NULL; - struct dwceqos_dma_desc *dd; - dma_addr_t dma_handle; - size_t i; - - /* Setup more ring and DMA descriptor if the packet is fragmented */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - size_t frag_size; - size_t consumed_size; - - /* Map DMA Area */ - dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "DMA Mapping error\n"); - return -ENOMEM; - } - - /* order-3 fragments span more than one descriptor. */ - frag_size = skb_frag_size(frag); - consumed_size = 0; - while (consumed_size < frag_size) { - size_t dma_size = min_t(size_t, 16376, - frag_size - consumed_size); - - rd = &lp->tx_skb[lp->tx_next]; - memset(rd, 0, sizeof(*rd)); - - dd = &lp->tx_descs[lp->tx_next]; - - /* Set DMA Descriptor fields */ - dd->des0 = dma_handle + consumed_size; - dd->des1 = 0; - dd->des2 = dma_size; - - if (skb_is_gso(skb)) - dd->des3 = (skb->len - tx->network_header_len); - else - dd->des3 = skb->len; - - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - consumed_size += dma_size; - } - - rd->len = skb_frag_size(frag); - rd->mapping = dma_handle; - } - - return 0; -} - -static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; - lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; - - lp->tx_skb[tx->last_descriptor].skb = skb; - - /* Make all descriptor updates visible to the DMA before setting the - * owner bit. - */ - wmb(); - - lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; - - /* Make the owner bit visible before TX wakeup. */ - wmb(); - - dwceqos_tx_poll_demand(lp); -} - -static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) -{ - size_t i = tx->initial_descriptor; - - while (i != lp->tx_next) { - if (lp->tx_skb[i].mapping) - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - - lp->tx_skb[i].mapping = 0; - lp->tx_skb[i].skb = NULL; - - memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); - - i = (i + 1) % DWCEQOS_TX_DCNT; - } - - lp->tx_next = tx->initial_descriptor; - lp->gso_size = tx->prev_gso_size; -} - -static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_tx trans; - int err; - - dwceqos_tx_prepare(skb, lp, &trans); - if (lp->tx_free < trans.nr_descriptors) { - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; - } - - err = dwceqos_tx_linear(skb, lp, &trans); - if (err) - goto tx_error; - - err = dwceqos_tx_frags(skb, lp, &trans); - if (err) - goto tx_error; - - WARN_ON(lp->tx_next != - ((trans.initial_descriptor + trans.nr_descriptors) % - DWCEQOS_TX_DCNT)); - - spin_lock_bh(&lp->tx_lock); - lp->tx_free -= trans.nr_descriptors; - dwceqos_tx_finalize(skb, lp, &trans); - netdev_sent_queue(ndev, skb->len); - spin_unlock_bh(&lp->tx_lock); - - netif_trans_update(ndev); - return 0; - -tx_error: - dwceqos_tx_rollback(lp, &trans); - dev_kfree_skb_any(skb); - return 0; -} - -/* Set MAC address and then update HW accordingly */ -static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) -{ - struct net_local *lp = netdev_priv(ndev); - struct sockaddr *hwaddr = (struct sockaddr *)addr; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(hwaddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - return 0; -} - -static void dwceqos_tx_timeout(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); -} - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n) -{ - unsigned long data; - - data = (addr[5] << 8) | addr[4]; - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), - data | DWCEQOS_MAC_MAC_ADDR_HI_EN); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); -} - -static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) -{ - /* Do not disable MAC address 0 */ - if (reg_n != 0) - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); -} - -static void dwceqos_set_rx_mode(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval = 0; - u32 mc_filter[2]; - int reg = 1; - struct netdev_hw_addr *ha; - unsigned int max_mac_addr; - - max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); - - if (ndev->flags & IFF_PROMISC) { - regval = DWCEQOS_MAC_PKT_FILT_PR; - } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || - (ndev->flags & IFF_ALLMULTI))) { - regval = DWCEQOS_MAC_PKT_FILT_PM; - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); - } else if (!netdev_mc_empty(ndev)) { - regval = DWCEQOS_MAC_PKT_FILT_HMC; - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, ndev) { - /* The upper 6 bits of the calculated CRC are used to - * index the contens of the hash table - */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; - /* The most significant bit determines the register - * to use (H/L) while the other 5 bits determine - * the bit within the register. - */ - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); - } - if (netdev_uc_count(ndev) > max_mac_addr) { - regval |= DWCEQOS_MAC_PKT_FILT_PR; - } else { - netdev_for_each_uc_addr(ha, ndev) { - dwceqos_set_umac_addr(lp, ha->addr, reg); - reg++; - } - for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) - dwceqos_disable_umac_addr(lp, reg); - } - dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void dwceqos_poll_controller(struct net_device *ndev) -{ - disable_irq(ndev->irq); - dwceqos_interrupt(ndev->irq, ndev); - enable_irq(ndev->irq); -} -#endif - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask) -{ - if (tx_mask & BIT(27)) - lp->mmc_counters.txlpitranscntr += - dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); - if (tx_mask & BIT(26)) - lp->mmc_counters.txpiuscntr += - dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); - if (tx_mask & BIT(25)) - lp->mmc_counters.txoversize_g += - dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); - if (tx_mask & BIT(24)) - lp->mmc_counters.txvlanpackets_g += - dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); - if (tx_mask & BIT(23)) - lp->mmc_counters.txpausepackets += - dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); - if (tx_mask & BIT(22)) - lp->mmc_counters.txexcessdef += - dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); - if (tx_mask & BIT(21)) - lp->mmc_counters.txpacketcount_g += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); - if (tx_mask & BIT(20)) - lp->mmc_counters.txoctetcount_g += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); - if (tx_mask & BIT(19)) - lp->mmc_counters.txcarriererror += - dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); - if (tx_mask & BIT(18)) - lp->mmc_counters.txexcesscol += - dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); - if (tx_mask & BIT(17)) - lp->mmc_counters.txlatecol += - dwceqos_read(lp, DWC_MMC_TXLATECOL); - if (tx_mask & BIT(16)) - lp->mmc_counters.txdeferred += - dwceqos_read(lp, DWC_MMC_TXDEFERRED); - if (tx_mask & BIT(15)) - lp->mmc_counters.txmulticol_g += - dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); - if (tx_mask & BIT(14)) - lp->mmc_counters.txsinglecol_g += - dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); - if (tx_mask & BIT(13)) - lp->mmc_counters.txunderflowerror += - dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); - if (tx_mask & BIT(12)) - lp->mmc_counters.txbroadcastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); - if (tx_mask & BIT(11)) - lp->mmc_counters.txmulticastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); - if (tx_mask & BIT(10)) - lp->mmc_counters.txunicastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); - if (tx_mask & BIT(9)) - lp->mmc_counters.tx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); - if (tx_mask & BIT(8)) - lp->mmc_counters.tx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); - if (tx_mask & BIT(7)) - lp->mmc_counters.tx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); - if (tx_mask & BIT(6)) - lp->mmc_counters.tx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); - if (tx_mask & BIT(5)) - lp->mmc_counters.tx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); - if (tx_mask & BIT(4)) - lp->mmc_counters.tx64octets_gb += - dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); - if (tx_mask & BIT(3)) - lp->mmc_counters.txmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); - if (tx_mask & BIT(2)) - lp->mmc_counters.txbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); - if (tx_mask & BIT(1)) - lp->mmc_counters.txpacketcount_gb += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); - if (tx_mask & BIT(0)) - lp->mmc_counters.txoctetcount_gb += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); - - if (rx_mask & BIT(27)) - lp->mmc_counters.rxlpitranscntr += - dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); - if (rx_mask & BIT(26)) - lp->mmc_counters.rxlpiuscntr += - dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); - if (rx_mask & BIT(25)) - lp->mmc_counters.rxctrlpackets_g += - dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); - if (rx_mask & BIT(24)) - lp->mmc_counters.rxrcverror += - dwceqos_read(lp, DWC_MMC_RXRCVERROR); - if (rx_mask & BIT(23)) - lp->mmc_counters.rxwatchdog += - dwceqos_read(lp, DWC_MMC_RXWATCHDOG); - if (rx_mask & BIT(22)) - lp->mmc_counters.rxvlanpackets_gb += - dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); - if (rx_mask & BIT(21)) - lp->mmc_counters.rxfifooverflow += - dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); - if (rx_mask & BIT(20)) - lp->mmc_counters.rxpausepackets += - dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); - if (rx_mask & BIT(19)) - lp->mmc_counters.rxoutofrangetype += - dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); - if (rx_mask & BIT(18)) - lp->mmc_counters.rxlengtherror += - dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); - if (rx_mask & BIT(17)) - lp->mmc_counters.rxunicastpackets_g += - dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); - if (rx_mask & BIT(16)) - lp->mmc_counters.rx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); - if (rx_mask & BIT(15)) - lp->mmc_counters.rx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); - if (rx_mask & BIT(14)) - lp->mmc_counters.rx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); - if (rx_mask & BIT(13)) - lp->mmc_counters.rx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); - if (rx_mask & BIT(12)) - lp->mmc_counters.rx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); - if (rx_mask & BIT(11)) - lp->mmc_counters.rx64octets_gb += - dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); - if (rx_mask & BIT(10)) - lp->mmc_counters.rxoversize_g += - dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); - if (rx_mask & BIT(9)) - lp->mmc_counters.rxundersize_g += - dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); - if (rx_mask & BIT(8)) - lp->mmc_counters.rxjabbererror += - dwceqos_read(lp, DWC_MMC_RXJABBERERROR); - if (rx_mask & BIT(7)) - lp->mmc_counters.rxrunterror += - dwceqos_read(lp, DWC_MMC_RXRUNTERROR); - if (rx_mask & BIT(6)) - lp->mmc_counters.rxalignmenterror += - dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); - if (rx_mask & BIT(5)) - lp->mmc_counters.rxcrcerror += - dwceqos_read(lp, DWC_MMC_RXCRCERROR); - if (rx_mask & BIT(4)) - lp->mmc_counters.rxmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); - if (rx_mask & BIT(3)) - lp->mmc_counters.rxbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); - if (rx_mask & BIT(2)) - lp->mmc_counters.rxoctetcount_g += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); - if (rx_mask & BIT(1)) - lp->mmc_counters.rxoctetcount_gb += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); - if (rx_mask & BIT(0)) - lp->mmc_counters.rxpacketcount_gb += - dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); -} - -static void -dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) -{ - unsigned long flags; - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - s->rx_packets = hwstats->rxpacketcount_gb; - s->rx_bytes = hwstats->rxoctetcount_gb; - s->rx_errors = hwstats->rxpacketcount_gb - - hwstats->rxbroadcastpackets_g - - hwstats->rxmulticastpackets_g - - hwstats->rxunicastpackets_g; - s->multicast = hwstats->rxmulticastpackets_g; - s->rx_length_errors = hwstats->rxlengtherror; - s->rx_crc_errors = hwstats->rxcrcerror; - s->rx_fifo_errors = hwstats->rxfifooverflow; - - s->tx_packets = hwstats->txpacketcount_gb; - s->tx_bytes = hwstats->txoctetcount_gb; - - if (lp->mmc_tx_counters_mask & BIT(21)) - s->tx_errors = hwstats->txpacketcount_gb - - hwstats->txpacketcount_g; - else - s->tx_errors = hwstats->txunderflowerror + - hwstats->txcarriererror; -} - -static void -dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) -{ - const struct net_local *lp = netdev_priv(ndev); - - strcpy(ed->driver, lp->pdev->dev.driver->name); - strcpy(ed->version, DRIVER_VERSION); -} - -static void dwceqos_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - const struct net_local *lp = netdev_priv(ndev); - - pp->autoneg = lp->flowcontrol.autoneg; - pp->tx_pause = lp->flowcontrol.tx; - pp->rx_pause = lp->flowcontrol.rx; -} - -static int dwceqos_set_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - struct net_local *lp = netdev_priv(ndev); - int ret = 0; - - lp->flowcontrol.autoneg = pp->autoneg; - if (pp->autoneg) { - ndev->phydev->advertising |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Asym_Pause; - } else { - ndev->phydev->advertising &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; - lp->flowcontrol.rx = pp->rx_pause; - lp->flowcontrol.tx = pp->tx_pause; - } - - if (netif_running(ndev)) - ret = phy_start_aneg(ndev->phydev); - - return ret; -} - -static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, - u8 *data) -{ - size_t i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, dwceqos_ethtool_stats[i].stat_name, - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } -} - -static void dwceqos_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct net_local *lp = netdev_priv(ndev); - unsigned long flags; - size_t i; - u8 *mmcstat = (u8 *)&lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, - mmcstat + dwceqos_ethtool_stats[i].offset, - sizeof(u64)); - data++; - } -} - -static int dwceqos_get_sset_count(struct net_device *ndev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(dwceqos_ethtool_stats); - - return -EOPNOTSUPP; -} - -static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *space) -{ - const struct net_local *lp = netdev_priv(dev); - u32 *reg_space = (u32 *)space; - int reg_offset; - int reg_ix = 0; - - /* MAC registers */ - for (reg_offset = START_MAC_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - /* MTL registers */ - for (reg_offset = START_MTL_REG_OFFSET; - reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - /* DMA registers */ - for (reg_offset = START_DMA_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - BUG_ON(4 * reg_ix > REG_SPACE_SIZE); -} - -static int dwceqos_get_regs_len(struct net_device *dev) -{ - return REG_SPACE_SIZE; -} - -static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; -} - -static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; -} - -static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 lpi_status; - u32 lpi_enabled; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - edata->eee_active = lp->eee_active; - edata->eee_enabled = lp->eee_enabled; - edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); - lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); - edata->tx_lpi_enabled = lpi_enabled; - - if (netif_msg_hw(lp)) { - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - - netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", - dwceqos_get_rx_lpi_state(regval), - dwceqos_get_tx_lpi_state(regval)); - } - - return phy_ethtool_get_eee(ndev->phydev, edata); -} - -static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval; - unsigned long flags; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - if (edata->eee_enabled && !lp->eee_active) - return -EOPNOTSUPP; - - if (edata->tx_lpi_enabled) { - if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || - edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) - return -EINVAL; - } - - lp->eee_enabled = edata->eee_enabled; - - if (edata->eee_enabled && edata->tx_lpi_enabled) { - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, - edata->tx_lpi_timer); - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; - if (lp->en_tx_lpi_clockgating) - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } - - return phy_ethtool_set_eee(ndev->phydev, edata); -} - -static u32 dwceqos_get_msglevel(struct net_device *ndev) -{ - const struct net_local *lp = netdev_priv(ndev); - - return lp->msg_enable; -} - -static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) -{ - struct net_local *lp = netdev_priv(ndev); - - lp->msg_enable = msglevel; -} - -static const struct ethtool_ops dwceqos_ethtool_ops = { - .get_drvinfo = dwceqos_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_pauseparam = dwceqos_get_pauseparam, - .set_pauseparam = dwceqos_set_pauseparam, - .get_strings = dwceqos_get_strings, - .get_ethtool_stats = dwceqos_get_ethtool_stats, - .get_sset_count = dwceqos_get_sset_count, - .get_regs = dwceqos_get_regs, - .get_regs_len = dwceqos_get_regs_len, - .get_eee = dwceqos_get_eee, - .set_eee = dwceqos_set_eee, - .get_msglevel = dwceqos_get_msglevel, - .set_msglevel = dwceqos_set_msglevel, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static const struct net_device_ops netdev_ops = { - .ndo_open = dwceqos_open, - .ndo_stop = dwceqos_stop, - .ndo_start_xmit = dwceqos_start_xmit, - .ndo_set_rx_mode = dwceqos_set_rx_mode, - .ndo_set_mac_address = dwceqos_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = dwceqos_poll_controller, -#endif - .ndo_do_ioctl = dwceqos_ioctl, - .ndo_tx_timeout = dwceqos_tx_timeout, - .ndo_get_stats64 = dwceqos_get_stats64, -}; - -static const struct of_device_id dwceq_of_match[] = { - { .compatible = "snps,dwc-qos-ethernet-4.10", }, - {} -}; -MODULE_DEVICE_TABLE(of, dwceq_of_match); - -static int dwceqos_probe(struct platform_device *pdev) -{ - struct resource *r_mem = NULL; - struct net_device *ndev; - struct net_local *lp; - int ret = -ENXIO; - - r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r_mem) { - dev_err(&pdev->dev, "no IO resource defined.\n"); - return -ENXIO; - } - - ndev = alloc_etherdev(sizeof(*lp)); - if (!ndev) { - dev_err(&pdev->dev, "etherdev allocation failed.\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - lp = netdev_priv(ndev); - lp->ndev = ndev; - lp->pdev = pdev; - lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); - - spin_lock_init(&lp->tx_lock); - spin_lock_init(&lp->hw_lock); - spin_lock_init(&lp->stats_lock); - - lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(lp->apb_pclk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - ret = PTR_ERR(lp->apb_pclk); - goto err_out_free_netdev; - } - - ret = clk_prepare_enable(lp->apb_pclk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable APER clock.\n"); - goto err_out_free_netdev; - } - - lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); - if (IS_ERR(lp->baseaddr)) { - dev_err(&pdev->dev, "failed to map baseaddress.\n"); - ret = PTR_ERR(lp->baseaddr); - goto err_out_clk_dis_aper; - } - - ndev->irq = platform_get_irq(pdev, 0); - ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; - ndev->netdev_ops = &netdev_ops; - ndev->ethtool_ops = &dwceqos_ethtool_ops; - ndev->base_addr = r_mem->start; - - dwceqos_get_hwfeatures(lp); - dwceqos_mdio_set_csr(lp); - - ndev->hw_features = NETIF_F_SG; - - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) - ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) - ndev->hw_features |= NETIF_F_RXCSUM; - - ndev->features = ndev->hw_features; - - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(lp->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_clk_dis_aper; - } - - ret = clk_prepare_enable(lp->phy_ref_clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_clk_dis_aper; - } - - lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, - "phy-handle", 0); - if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { - ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_clk_dis_phy; - } - - lp->phy_node = of_node_get(lp->pdev->dev.of_node); - } - - ret = of_get_phy_mode(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_deregister_fixed_link; - } - - lp->phy_interface = ret; - - ret = dwceqos_mii_init(lp); - if (ret) { - dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_deregister_fixed_link; - } - - ret = dwceqos_mii_probe(ndev); - if (ret != 0) { - netdev_err(ndev, "mii_probe fail.\n"); - ret = -ENXIO; - goto err_out_deregister_fixed_link; - } - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, - (unsigned long)ndev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - - lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, - WQ_MEM_RECLAIM, 0); - INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); - - platform_set_drvdata(pdev, ndev); - ret = dwceqos_probe_config_dt(pdev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", - ret); - goto err_out_deregister_fixed_link; - } - dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", - pdev->id, ndev->base_addr, ndev->irq); - - ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, - ndev->name, ndev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", - ndev->irq, ret); - goto err_out_deregister_fixed_link; - } - - if (netif_msg_probe(lp)) - netdev_dbg(ndev, "net_local@%p\n", lp); - - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_deregister_fixed_link; - } - - return 0; - -err_out_deregister_fixed_link: - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); -err_out_clk_dis_phy: - clk_disable_unprepare(lp->phy_ref_clk); -err_out_clk_dis_aper: - clk_disable_unprepare(lp->apb_pclk); -err_out_free_netdev: - of_node_put(lp->phy_node); - free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return ret; -} - -static int dwceqos_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct net_local *lp; - - if (ndev) { - lp = netdev_priv(ndev); - - if (ndev->phydev) { - phy_disconnect(ndev->phydev); - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); - } - mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); - - unregister_netdev(ndev); - - clk_disable_unprepare(lp->phy_ref_clk); - clk_disable_unprepare(lp->apb_pclk); - - free_netdev(ndev); - } - - return 0; -} - -static struct platform_driver dwceqos_driver = { - .probe = dwceqos_probe, - .remove = dwceqos_remove, - .driver = { - .name = DRIVER_NAME, - .of_match_table = dwceq_of_match, - }, -}; - -module_platform_driver(dwceqos_driver); - -MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>"); -MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>"); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index baa3e4a5731c..f864fd0663db 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); bdx_enable_interrupts(priv); } return work_done; diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77c88fcf2b86..9b8a30bf939b 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1210,7 +1210,7 @@ int cpmac_init(void) goto fail_alloc; } -#warning FIXME: unhardcode gpio&reset bits + /* FIXME: unhardcode gpio&reset bits */ ar7_gpio_disable(26); ar7_gpio_disable(27); ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 02b03ee2e314..35a95dcc755b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -357,7 +357,6 @@ struct cpsw_slave { struct phy_device *phy; struct net_device *ndev; u32 port_vlan; - u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -672,6 +671,18 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw) return; } +static int cpsw_get_usage_count(struct cpsw_common *cpsw) +{ + u32 i; + u32 usage_count = 0; + + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev && netif_running(cpsw->slaves[i].ndev)) + usage_count++; + + return usage_count; +} + static void cpsw_tx_handler(void *token, int len, int status) { struct netdev_queue *txq; @@ -704,18 +715,10 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { - bool ndev_status = false; - struct cpsw_slave *slave = cpsw->slaves; - int n; - - if (cpsw->data.dual_emac) { - /* In dual emac mode check for all interfaces */ - for (n = cpsw->data.slaves; n; n--, slave++) - if (netif_running(slave->ndev)) - ndev_status = true; - } - - if (ndev_status && (status >= 0)) { + /* In dual emac mode check for all interfaces */ + if (cpsw->data.dual_emac && + cpsw_get_usage_count(cpsw) && + (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results @@ -939,7 +942,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) } if (num_rx < budget) { - napi_complete(napi_rx); + napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; @@ -1235,21 +1238,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev, } } -static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) -{ - u32 i; - u32 usage_count = 0; - - if (!cpsw->data.dual_emac) - return 0; - - for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].open_stat) - usage_count++; - - return usage_count; -} - static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, struct sk_buff *skb, struct cpdma_chan *txch) @@ -1483,8 +1471,6 @@ static int cpsw_ndo_open(struct net_device *ndev) return ret; } - if (!cpsw_common_res_usage_state(cpsw)) - cpsw_intr_disable(cpsw); netif_carrier_off(ndev); /* Notify the stack of the actual queue counts. */ @@ -1506,8 +1492,11 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); - /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(cpsw)) + /* Initialize host and slave ports. + * Given ndev is marked as opened already, so init port only if 1 ndev + * is opened + */ + if (cpsw_get_usage_count(cpsw) < 2) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); @@ -1518,7 +1507,10 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(cpsw)) { + /* Given ndev is marked as opened already, so if more ndev + * are opened - no need to init shared resources. + */ + if (cpsw_get_usage_count(cpsw) < 2) { /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype); @@ -1561,9 +1553,6 @@ static int cpsw_ndo_open(struct net_device *ndev) cpdma_ctlr_start(cpsw->dma); cpsw_intr_enable(cpsw); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = true; - return 0; err_cleanup: @@ -1583,7 +1572,10 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(cpsw) <= 1) { + /* Given ndev is marked as close already, + * so disable shared resources if no open devices + */ + if (!cpsw_get_usage_count(cpsw)) { napi_disable(&cpsw->napi_rx); napi_disable(&cpsw->napi_tx); cpts_unregister(cpsw->cpts); @@ -1597,8 +1589,6 @@ static int cpsw_ndo_stop(struct net_device *ndev) cpsw_split_res(ndev); pm_runtime_put_sync(cpsw->dev); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -2368,17 +2358,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv, return 0; } -static int cpsw_set_channels(struct net_device *ndev, - struct ethtool_channels *chs) +static void cpsw_suspend_data_pass(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); struct cpsw_slave *slave; - int i, ret; - - ret = cpsw_check_ch_settings(cpsw, chs); - if (ret < 0) - return ret; + int i; /* Disable NAPI scheduling */ cpsw_intr_disable(cpsw); @@ -2396,6 +2380,51 @@ static int cpsw_set_channels(struct net_device *ndev, /* Handle rest of tx packets and stop cpdma channels */ cpdma_ctlr_stop(cpsw->dma); +} + +static int cpsw_resume_data_pass(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* Allow rx packets handling */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_dormant_off(slave->ndev); + + /* After this receive is started */ + if (cpsw_get_usage_count(cpsw)) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + return ret; + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_tx_start_all_queues(slave->ndev); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + cpsw_suspend_data_pass(ndev); ret = cpsw_update_channels(priv, chs); if (ret) goto err; @@ -2418,30 +2447,14 @@ static int cpsw_set_channels(struct net_device *ndev, dev_err(priv->dev, "cannot set real number of rx queues\n"); goto err; } - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); } - if (cpsw_common_res_usage_state(cpsw)) { - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - + if (cpsw_get_usage_count(cpsw)) cpsw_split_res(ndev); - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } - - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; err: dev_err(priv->dev, "cannot update channels number, closing device\n"); dev_close(ndev); @@ -2502,8 +2515,7 @@ static int cpsw_set_ringparam(struct net_device *ndev, { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int i, ret; + int ret; /* ignore ering->tx_pending - only rx_pending adjustment is supported */ @@ -2515,54 +2527,18 @@ static int cpsw_set_ringparam(struct net_device *ndev, if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) return 0; - /* Disable NAPI scheduling */ - cpsw_intr_disable(cpsw); - - /* Stop all transmit queues for every network device. - * Disable re-using rx descriptors with dormant_on. - */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - netif_tx_stop_all_queues(slave->ndev); - netif_dormant_on(slave->ndev); - } - - /* Handle rest of tx packets and stop cpdma channels */ - cpdma_ctlr_stop(cpsw->dma); + cpsw_suspend_data_pass(ndev); cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); - } - - if (cpsw_common_res_usage_state(cpsw)) { + if (cpsw_get_usage_count(cpsw)) cpdma_chan_split_pool(cpsw->dma); - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; -err: - dev_err(priv->dev, "cannot set ring params, closing device\n"); + dev_err(&ndev->dev, "cannot set ring params, closing device\n"); dev_close(ndev); return ret; } diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index d80bff19d4ec..7ecc6b70e7e8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -835,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); */ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) { - struct cpdma_ctlr *ctlr = ch->ctlr; unsigned long flags, ch_flags; + struct cpdma_ctlr *ctlr; int ret, prio_mode; u32 rmask; @@ -846,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) if (ch->rate == rate) return rate; + ctlr = ch->ctlr; spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ch->lock, ch_flags); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 481c7bf0395b..64d5527feb2a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget) &emac_rxhost_errcodes[cause][0], ch); } } else if (num_rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, num_rx_pkts); emac_int_enable(priv); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 68a75cc0731c..ebab1473f366 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -969,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) netcp_rxpool_refill(netcp); if (packets < budget) { - napi_complete(&netcp->rx_napi); + napi_complete_done(&netcp->rx_napi, packets); knav_queue_enable_notify(netcp->rx_queue); } @@ -1909,7 +1909,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return 0; } -static struct rtnl_link_stats64 * +static void netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) { struct netcp_intf *netcp = netdev_priv(ndev); @@ -1938,8 +1938,6 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) stats->rx_errors = p->rx_errors; stats->rx_dropped = p->rx_dropped; stats->tx_dropped = p->tx_dropped; - - return stats; } static const struct net_device_ops netcp_netdev_ops = { diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 2255f9a6f3bc..7c634bc75615 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info_mpipe->napi); + napi_complete_done(&info_mpipe->napi, work); md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 30cfea62a356..49ccee4b9aec 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } } - napi_complete(&info->napi); + napi_complete_done(&info->napi, work); if (!priv->active) goto done; @@ -2090,12 +2090,8 @@ static void tile_net_get_stats64(struct net_device *dev, stats->tx_bytes = tx_bytes; stats->rx_errors = rx_errors; stats->rx_dropped = rx_dropped; - - return stats; } - - /* * Change the Ethernet Address of the NIC. * diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 345316c749e7..72013314bba8 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget) } if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); gelic_card_rx_irq_on(card); } return packets_done; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cb341dfe65ad..cec9e70ab995 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 3be61ed28741..a45f98fa4aa7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) spin_unlock(&lp->rx_lock); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index f153ad729ce5..c5583991da4a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) if (num_received < budget) { data->rxpending = 0; - napi_complete(napi); + napi_complete_done(napi, num_received); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 453a1fad560c..c068c58428f7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); mmiowb(); } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4716e60e2ccb..d088788b27a7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget) velocity_tx_srv(vptr); /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); mac_enable_int(vptr->mac_regs); } spin_unlock_irqrestore(&vptr->lock, flags); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index e1296ef2cf66..f90267f0519f 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5100_enable_intr(priv); } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 724fabd38a23..56ae573001e8 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 97dcc0bd5a85..e3070fd88bce 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1051,7 +1051,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s) } } -static struct net_device_ops xemaclite_netdev_ops; +static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. @@ -1205,7 +1205,7 @@ xemaclite_poll_controller(struct net_device *ndev) } #endif -static struct net_device_ops xemaclite_netdev_ops = { +static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 5028001429c7..b75d9cdcfb0c 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1155,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->unset_rx_last) { adapter->rx_last_jiffies = jiffies; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 8b6810bad54b..bda0c6413450 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -69,7 +69,6 @@ struct gtp_dev { struct socket *sock0; struct socket *sock1u; - struct net *net; struct net_device *dev; unsigned int hash_size; @@ -184,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -197,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, if (gtp0->type != GTP_TPDU) return 1; - rcu_read_lock(); pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return 1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, @@ -226,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -254,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); - rcu_read_lock(); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return 1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static void gtp_encap_disable(struct gtp_dev *gtp) @@ -316,7 +299,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); - xnet = !net_eq(gtp->net, dev_net(gtp->dev)); + xnet = !net_eq(sock_net(sk), dev_net(gtp->dev)); switch (udp_sk(sk)->encap_type) { case UDP_ENCAP_GTP0: @@ -612,7 +595,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) pktinfo.fl4.saddr, pktinfo.fl4.daddr, pktinfo.iph->tos, ip4_dst_hoplimit(&pktinfo.rt->dst), - htons(IP_DF), + 0, pktinfo.gtph_port, pktinfo.gtph_port, true, false); break; @@ -658,7 +641,7 @@ static void gtp_link_setup(struct net_device *dev) static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1, struct net *src_net); + int fd_gtp0, int fd_gtp1); static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) @@ -675,7 +658,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, fd0 = nla_get_u32(data[IFLA_GTP_FD0]); fd1 = nla_get_u32(data[IFLA_GTP_FD1]); - err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); + err = gtp_encap_enable(dev, gtp, fd0, fd1); if (err < 0) goto out_err; @@ -821,7 +804,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) } static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1, struct net *src_net) + int fd_gtp0, int fd_gtp1) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct socket *sock0, *sock1u; @@ -858,7 +841,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, gtp->sock0 = sock0; gtp->sock1u = sock1u; - gtp->net = src_net; tuncfg.sk_user_data = gtp; tuncfg.encap_rcv = gtp_encap_recv; @@ -1376,3 +1358,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("gtp"); +MODULE_ALIAS_GENL_FAMILY("gtp"); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 3958adade7eb..d3e73ac158ae 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -34,6 +34,7 @@ #define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88 #define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89 +#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7 #define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2 #define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2 @@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ /* Fwd declaration */ struct ndis_tcp_ip_checksum_info; +struct ndis_pkt_8021q_info; /* * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame @@ -135,8 +137,10 @@ struct hv_netvsc_packet { u8 page_buf_cnt; u16 q_idx; - u32 send_buf_index; + u16 total_packets; + u32 total_bytes; + u32 send_buf_index; u32 total_data_buflen; }; @@ -155,6 +159,8 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; +#define NETVSC_HASH_KEYLEN 40 + struct rndis_device { struct net_device *ndev; @@ -165,14 +171,17 @@ struct rndis_device { spinlock_t request_lock; struct list_head req_list; - unsigned char hw_mac_adr[ETH_ALEN]; + u8 hw_mac_adr[ETH_ALEN]; + u8 rss_key[NETVSC_HASH_KEYLEN]; + u16 ind_table[ITAB_NUM]; }; /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, void *additional_info); +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, @@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci); +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, - void *additional_info); -void rndis_filter_device_remove(struct hv_device *dev); -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel); + struct netvsc_device_info *info); +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *nvdev); +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *key, int num_queue); +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen); int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); @@ -622,6 +634,7 @@ struct nvsp_message { #define VRSS_SEND_TAB_SIZE 16 #define VRSS_CHANNEL_MAX 64 +#define VRSS_CHANNEL_DEFAULT 8 #define RNDIS_MAX_PKT_DEFAULT 8 #define RNDIS_PKT_ALIGN_DEFAULT 8 @@ -685,8 +698,7 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ - struct netvsc_stats __percpu *tx_stats; - struct netvsc_stats __percpu *rx_stats; + u32 tx_checksum_mask; /* Ethtool settings */ u8 duplex; @@ -705,11 +717,21 @@ struct net_device_context { u32 vf_serial; }; +/* Per channel data */ +struct netvsc_channel { + struct vmbus_channel *channel; + struct multi_send_data msd; + struct multi_recv_comp mrc; + atomic_t queue_sends; + + struct netvsc_stats tx_stats; + struct netvsc_stats rx_stats; +}; + /* Per netvsc device */ struct netvsc_device { u32 nvsp_version; - atomic_t num_outstanding_sends; wait_queue_head_t wait_drain; bool destroy; @@ -735,32 +757,25 @@ struct netvsc_device { struct nvsp_message revoke_packet; - struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX]; u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; spinlock_t sc_lock; /* Protects num_sc_offered variable */ u32 num_sc_offered; - atomic_t queue_sends[VRSS_CHANNEL_MAX]; /* Holds rndis device info */ void *extension; int ring_size; - /* The primary channel callback buffer */ - unsigned char *cb_buffer; - /* The sub channel callback buffer */ - unsigned char *sub_cb_buf; - - struct multi_send_data msd[VRSS_CHANNEL_MAX]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - struct multi_recv_comp mrc[VRSS_CHANNEL_MAX]; atomic_t num_outstanding_recvs; atomic_t open_cnt; + + struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; }; static inline struct netvsc_device * @@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info { }; }; -struct ndis_oject_header { +struct ndis_object_header { u8 type; u8 revision; u16 size; @@ -947,6 +962,9 @@ struct ndis_oject_header { #define NDIS_OBJECT_TYPE_DEFAULT 0x80 #define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1 + #define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2 @@ -973,8 +991,135 @@ struct ndis_oject_header { #define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */ #define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */ +/* + * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_OFFLOAD + */ + +#define NDIS_OFFLOAD_ENCAP_NONE 0x0000 +#define NDIS_OFFLOAD_ENCAP_NULL 0x0001 +#define NDIS_OFFLOAD_ENCAP_8023 0x0002 +#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004 +#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008 +#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010 + +struct ndis_csum_offload { + u32 ip4_txenc; + u32 ip4_txcsum; +#define NDIS_TXCSUM_CAP_IP4OPT 0x001 +#define NDIS_TXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP4 0x010 +#define NDIS_TXCSUM_CAP_UDP4 0x040 +#define NDIS_TXCSUM_CAP_IP4 0x100 + +#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT) + + u32 ip4_rxenc; + u32 ip4_rxcsum; +#define NDIS_RXCSUM_CAP_IP4OPT 0x001 +#define NDIS_RXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP4 0x010 +#define NDIS_RXCSUM_CAP_UDP4 0x040 +#define NDIS_RXCSUM_CAP_IP4 0x100 + u32 ip6_txenc; + u32 ip6_txcsum; +#define NDIS_TXCSUM_CAP_IP6EXT 0x001 +#define NDIS_TXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP6 0x010 +#define NDIS_TXCSUM_CAP_UDP6 0x040 + u32 ip6_rxenc; + u32 ip6_rxcsum; +#define NDIS_RXCSUM_CAP_IP6EXT 0x001 +#define NDIS_RXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP6 0x010 +#define NDIS_RXCSUM_CAP_UDP6 0x040 + +#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \ + NDIS_TXCSUM_CAP_TCP6OPT | \ + NDIS_TXCSUM_CAP_IP6EXT) +}; + +struct ndis_lsov1_offload { + u32 encap; + u32 maxsize; + u32 minsegs; + u32 opts; +}; + +struct ndis_ipsecv1_offload { + u32 encap; + u32 ah_esp; + u32 xport_tun; + u32 ip4_opts; + u32 flags; + u32 ip4_ah; + u32 ip4_esp; +}; + +struct ndis_lsov2_offload { + u32 ip4_encap; + u32 ip4_maxsz; + u32 ip4_minsg; + u32 ip6_encap; + u32 ip6_maxsz; + u32 ip6_minsg; + u32 ip6_opts; +#define NDIS_LSOV2_CAP_IP6EXT 0x001 +#define NDIS_LSOV2_CAP_TCP6OPT 0x004 + +#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \ + NDIS_LSOV2_CAP_TCP6OPT) +}; + +struct ndis_ipsecv2_offload { + u32 encap; + u16 ip6; + u16 ip4opt; + u16 ip6ext; + u16 ah; + u16 esp; + u16 ah_esp; + u16 xport; + u16 tun; + u16 xport_tun; + u16 lso; + u16 extseq; + u32 udp_esp; + u32 auth; + u32 crypto; + u32 sa_caps; +}; + +struct ndis_rsc_offload { + u16 ip4; + u16 ip6; +}; + +struct ndis_encap_offload { + u32 flags; + u32 maxhdr; +}; + +struct ndis_offload { + struct ndis_object_header header; + struct ndis_csum_offload csum; + struct ndis_lsov1_offload lsov1; + struct ndis_ipsecv1_offload ipsecv1; + struct ndis_lsov2_offload lsov2; + u32 flags; + /* NDIS >= 6.1 */ + struct ndis_ipsecv2_offload ipsecv2; + /* NDIS >= 6.30 */ + struct ndis_rsc_offload rsc; + struct ndis_encap_offload encap_gre; +}; + +#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload) +#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2) +#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc) + struct ndis_offload_params { - struct ndis_oject_header header; + struct ndis_object_header header; u8 ip_v4_csum; u8 tcp_ip_v4_csum; u8 udp_ip_v4_csum; @@ -1301,15 +1446,10 @@ struct rndis_message { #define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 #define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 -#define INFO_IPV4 2 -#define INFO_IPV6 4 -#define INFO_TCP 2 -#define INFO_UDP 4 - #define TRANSPORT_INFO_NOT_IP 0 -#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP) -#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP) +#define TRANSPORT_INFO_IPV4_TCP 0x01 +#define TRANSPORT_INFO_IPV4_UDP 0x02 +#define TRANSPORT_INFO_IPV6_TCP 0x10 +#define TRANSPORT_INFO_IPV6_UDP 0x20 #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5a1cc089acb7..5cfdb1a1b4c1 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); - if (!net_device->cb_buffer) { - kfree(net_device); - return NULL; - } - - net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + net_device->chan_table[0].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; @@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->mrc[i].buf); + vfree(nvdev->chan_table[i].mrc.buf); - kfree(nvdev->cb_buffer); kfree(nvdev); } -static struct netvsc_device *get_outbound_net_device(struct hv_device *device) -{ - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (net_device && net_device->destroy) - net_device = NULL; +static inline bool netvsc_channel_idle(const struct netvsc_device *net_device, + u16 q_idx) +{ + const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; - return net_device; + return atomic_read(&net_device->num_outstanding_recvs) == 0 && + atomic_read(&nvchan->queue_sends) == 0; } -static struct netvsc_device *get_inbound_net_device(struct hv_device *device) +static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (!net_device) - goto get_in_err; - - if (net_device->destroy && - atomic_read(&net_device->num_outstanding_sends) == 0 && - atomic_read(&net_device->num_outstanding_recvs) == 0) + if (net_device && net_device->destroy) net_device = NULL; -get_in_err: return net_device; } @@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device) vmbus_close(device->channel); /* Release all resources */ - vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); } @@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); struct vmbus_channel *channel = device->channel; - int num_outstanding_sends; u16 q_idx = 0; int queue_sends; /* Notify the layer above us */ if (likely(skb)) { - struct hv_netvsc_packet *nvsc_packet + const struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; - u32 send_index = nvsc_packet->send_buf_index; + u32 send_index = packet->send_buf_index; + struct netvsc_stats *tx_stats; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); - q_idx = nvsc_packet->q_idx; + q_idx = packet->q_idx; channel = incoming_channel; + tx_stats = &net_device->chan_table[q_idx].tx_stats; + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets += packet->total_packets; + tx_stats->bytes += packet->total_bytes; + u64_stats_update_end(&tx_stats->syncp); + dev_consume_skb_any(skb); } - num_outstanding_sends = - atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]); + queue_sends = + atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && num_outstanding_sends == 0) + if (net_device->destroy && queue_sends == 0) wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && @@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device, static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) { - unsigned long index; - u32 max_words = net_device->map_words; - unsigned long *map_addr = (unsigned long *)net_device->send_section_map; - u32 section_cnt = net_device->send_section_cnt; - int ret_val = NETVSC_INVALID_INDEX; - int i; - int prev_val; - - for (i = 0; i < max_words; i++) { - if (!~(map_addr[i])) - continue; - index = ffz(map_addr[i]); - prev_val = sync_test_and_set_bit(index, &map_addr[i]); - if (prev_val) - continue; - if ((index + (i * BITS_PER_LONG)) >= section_cnt) - break; - ret_val = (index + (i * BITS_PER_LONG)); - break; + unsigned long *map_addr = net_device->send_section_map; + unsigned int i; + + for_each_clear_bit(i, map_addr, net_device->map_words) { + if (sync_test_and_set_bit(i, map_addr) == 0) + return i; } - return ret_val; + + return NETVSC_INVALID_INDEX; } static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, @@ -765,9 +745,11 @@ static inline int netvsc_send_pkt( struct sk_buff *skb) { struct nvsp_message nvmsg; - u16 q_idx = packet->q_idx; - struct vmbus_channel *out_channel = net_device->chn_table[q_idx]; + struct netvsc_channel *nvchan + = &net_device->chan_table[packet->q_idx]; + struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); + struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; struct hv_page_buffer *pgbuf; @@ -827,23 +809,14 @@ static inline int netvsc_send_pkt( } if (ret == 0) { - atomic_inc(&net_device->num_outstanding_sends); - atomic_inc(&net_device->queue_sends[q_idx]); - - if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { - netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); + atomic_inc_return(&nvchan->queue_sends); - if (atomic_read(&net_device-> - queue_sends[q_idx]) < 1) - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); - } + if (ring_avail < RING_AVAIL_PERCENT_LOWATER) + netif_tx_stop_queue(txq); } else if (ret == -EAGAIN) { - netif_tx_stop_queue(netdev_get_tx_queue( - ndev, q_idx)); - if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); + netif_tx_stop_queue(txq); + if (atomic_read(&nvchan->queue_sends) < 1) { + netif_tx_wake_queue(txq); ret = -ENOSPC; } } else { @@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device, { struct netvsc_device *net_device; int ret = 0; - struct vmbus_channel *out_channel; - u16 q_idx = packet->q_idx; + struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; unsigned int section_index = NETVSC_INVALID_INDEX; struct multi_send_data *msdp; @@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device, if (!net_device->send_section_map) return -EAGAIN; - out_channel = net_device->chn_table[q_idx]; - + nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device, goto send_now; } - msdp = &net_device->msd[q_idx]; - /* batch packets in send buffer if possible */ + msdp = &nvchan->msd; if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; @@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device, packet->total_data_buflen += msd_len; } + if (msdp->pkt) { + packet->total_packets += msdp->pkt->total_packets; + packet->total_bytes += msdp->pkt->total_bytes; + } + if (msdp->skb) dev_consume_skb_any(msdp->skb); @@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel, static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, u32 *filled, u32 *avail) { - u32 first = nvdev->mrc[q_idx].first; - u32 next = nvdev->mrc[q_idx].next; + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; + u32 first = mrc->first; + u32 next = mrc->next; *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : next - first; @@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; count_recv_comp_slot(nvdev, q_idx, &filled, &avail); if (!filled) return NULL; - return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first * - sizeof(struct recv_comp_data); + return mrc->buf + mrc->first * sizeof(struct recv_comp_data); } /* Put the first filled slot back to available pool */ static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; int num_recv; - nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) % - NETVSC_RECVSLOT_MAX; + mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); @@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, static inline struct recv_comp_data *get_recv_comp_slot( struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail, next; struct recv_comp_data *rcd; - if (!nvdev->recv_section) + if (unlikely(!nvdev->recv_section)) return NULL; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; if (atomic_read(&nvdev->num_outstanding_recvs) > @@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot( if (!avail) return NULL; - next = nvdev->mrc[q_idx].next; - rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data); - nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX; + next = mrc->next; + rcd = mrc->buf + next * sizeof(struct recv_comp_data); + mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; atomic_inc(&nvdev->num_outstanding_recvs); return rcd; } -static void netvsc_receive(struct netvsc_device *net_device, - struct vmbus_channel *channel, - struct hv_device *device, - struct vmpacket_descriptor *packet) +static void netvsc_receive(struct net_device *ndev, + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + struct vmtransfer_page_packet_header *vmxferpage_packet, + struct nvsp_message *nvsp) { - struct vmtransfer_page_packet_header *vmxferpage_packet; - struct nvsp_message *nvsp_packet; - struct hv_netvsc_packet nv_pkt; - struct hv_netvsc_packet *netvsc_packet = &nv_pkt; + char *recv_buf = net_device->recv_buf; u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - struct net_device *ndev = hv_get_drvdata(device); - void *data; int ret; struct recv_comp_data *rcd; u16 q_idx = channel->offermsg.offer.sub_channel_index; - /* - * All inbound packets other than send completion should be xfer page - * packet - */ - if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { - netdev_err(ndev, "Unknown packet type received - %d\n", - packet->type); - return; - } - - nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); - /* Make sure this is a valid nvsp packet */ - if (nvsp_packet->hdr.msg_type != - NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { - netdev_err(ndev, "Unknown nvsp packet type received-" - " %d\n", nvsp_packet->hdr.msg_type); + if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { + netif_err(net_device_ctx, rx_err, ndev, + "Unknown nvsp packet type received %u\n", + nvsp->hdr.msg_type); return; } - vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; - - if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { - netdev_err(ndev, "Invalid xfer page set id - " - "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, - vmxferpage_packet->xfer_pageset_id); + if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { + netif_err(net_device_ctx, rx_err, ndev, + "Invalid xfer page set id - expecting %x got %x\n", + NETVSC_RECEIVE_BUFFER_ID, + vmxferpage_packet->xfer_pageset_id); return; } @@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device, /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { - /* Initialize the netvsc packet */ - data = (void *)((unsigned long)net_device-> - recv_buf + vmxferpage_packet->ranges[i].byte_offset); - netvsc_packet->total_data_buflen = - vmxferpage_packet->ranges[i].byte_count; + void *data = recv_buf + + vmxferpage_packet->ranges[i].byte_offset; + u32 buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - status = rndis_filter_receive(device, netvsc_packet, &data, - channel); + status = rndis_filter_receive(ndev, net_device, device, + channel, data, buflen); } - if (!net_device->mrc[q_idx].buf) { + if (!net_device->chan_table[q_idx].mrc.buf) { ret = netvsc_send_recv_completion(channel, vmxferpage_packet->d.trans_id, status); @@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, u64 request_id, struct vmpacket_descriptor *desc) { - struct nvsp_message *nvmsg; struct net_device_context *net_device_ctx = netdev_priv(ndev); - - nvmsg = (struct nvsp_message *)((unsigned long) - desc + (desc->offset8 << 3)); + struct nvsp_message *nvmsg + = (struct nvsp_message *)((unsigned long)desc + + (desc->offset8 << 3)); switch (desc->type) { case VM_PKT_COMP: @@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_USING_XFER_PAGES: - netvsc_receive(net_device, channel, device, desc); + netvsc_receive(ndev, net_device, net_device_ctx, + device, channel, + (struct vmtransfer_page_packet_header *)desc, + nvmsg); break; case VM_PKT_DATA_INBAND: @@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device, void netvsc_channel_cb(void *context) { - int ret; - struct vmbus_channel *channel = (struct vmbus_channel *)context; + struct vmbus_channel *channel = context; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct hv_device *device; struct netvsc_device *net_device; - u32 bytes_recvd; - u64 request_id; struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; bool need_to_commit = false; @@ -1289,68 +1245,25 @@ void netvsc_channel_cb(void *context) else device = channel->device_obj; - net_device = get_inbound_net_device(device); - if (!net_device) - return; ndev = hv_get_drvdata(device); - buffer = get_per_channel_state(channel); - - do { - desc = get_next_pkt_raw(channel); - if (desc != NULL) { - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - desc->trans_id, - desc); - - put_pkt_raw(channel, desc); - need_to_commit = true; - continue; - } - if (need_to_commit) { - need_to_commit = false; - commit_rd_index(channel); - } + if (unlikely(!ndev)) + return; - ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, - &bytes_recvd, &request_id); - if (ret == 0) { - if (bytes_recvd > 0) { - desc = (struct vmpacket_descriptor *)buffer; - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - request_id, - desc); - } else { - /* - * We are done for this pass. - */ - break; - } - - } else if (ret == -ENOBUFS) { - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); - /* Handle large packet */ - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (buffer == NULL) { - /* Try again next time around */ - netdev_err(ndev, - "unable to allocate buffer of size " - "(%d)!!\n", bytes_recvd); - break; - } - - bufferlen = bytes_recvd; - } - } while (1); + net_device = net_device_to_netvsc_device(ndev); + if (unlikely(net_device->destroy) && + netvsc_channel_idle(net_device, q_idx)) + return; + + while ((desc = get_next_pkt_raw(channel)) != NULL) { + netvsc_process_raw_pkt(device, channel, net_device, + ndev, desc->trans_id, desc); - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); + put_pkt_raw(channel, desc); + need_to_commit = true; + } + + if (need_to_commit) + commit_rd_index(channel); netvsc_chk_recv_comp(net_device, channel, q_idx); } @@ -1359,11 +1272,11 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, void *additional_info) +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; - int ring_size = - ((struct netvsc_device_info *)additional_info)->ring_size; + int ring_size = device_info->ring_size; struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -1374,8 +1287,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - set_per_channel_state(device->channel, net_device->cb_buffer); - /* Open the channel */ ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, ring_size * PAGE_SIZE, NULL, 0, @@ -1394,7 +1305,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) * opened. */ for (i = 0; i < VRSS_CHANNEL_MAX; i++) - net_device->chn_table[i] = device->channel; + net_device->chan_table[i].channel = device->channel; /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is * populated. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 05374fce7da4..72b0c1f7496e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,21 +42,11 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) -#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_TSO6 | \ - NETIF_F_HW_CSUM) - -/* Restrict GSO size to account for NVGRE */ -#define NETVSC_GSO_MAX_SIZE 62768 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); -static int max_num_vrss_chns = 8; - static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net) while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chn_table[i]; + chn = nvdev->chan_table[i].channel; if (!chn) continue; @@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } +/* + * Select queue for transmit. + * + * If a valid queue has already been assigned, then use that. + * Otherwise compute tx queue based on hash and the send table. + * + * This is basically similar to default (__netdev_pick_tx) with the added step + * of using the host send_table when no other queue has been assigned. + * + * TODO support XPS - but get_xps_queue not exported + */ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; - u32 hash; - u16 q_idx = 0; + struct sock *sk = skb->sk; + int q_idx = sk_tx_queue_get(sk); - if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) - return 0; + if (q_idx < 0 || skb->ooo_okay || + q_idx >= ndev->real_num_tx_queues) { + u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); + int new_idx; + + new_idx = nvsc_dev->send_table[hash] + % nvsc_dev->num_chn; + + if (q_idx != new_idx && sk && + sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_idx); - hash = skb_get_hash(skb); - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % - ndev->real_num_tx_queues; + q_idx = new_idx; + } - if (!nvsc_dev->chn_table[q_idx]) + if (unlikely(!nvsc_dev->chan_table[q_idx].channel)) q_idx = 0; return q_idx; @@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb) return slots + frag_slots; } -static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) +static u32 net_checksum_info(struct sk_buff *skb) { - u32 ret_val = TRANSPORT_INFO_NOT_IP; - - if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) && - (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) { - goto not_ip; - } + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip = ip_hdr(skb); - *trans_off = skb_transport_offset(skb); - - if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) { - struct iphdr *iphdr = ip_hdr(skb); - - if (iphdr->protocol == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV4_TCP; - else if (iphdr->protocol == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV4_UDP; + if (ip->protocol == IPPROTO_TCP) + return TRANSPORT_INFO_IPV4_TCP; + else if (ip->protocol == IPPROTO_UDP) + return TRANSPORT_INFO_IPV4_UDP; } else { - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV6_TCP; + struct ipv6hdr *ip6 = ipv6_hdr(skb); + + if (ip6->nexthdr == IPPROTO_TCP) + return TRANSPORT_INFO_IPV6_TCP; else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV6_UDP; + return TRANSPORT_INFO_IPV6_UDP; } -not_ip: - return ret_val; + return TRANSPORT_INFO_NOT_IP; } static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) @@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct rndis_packet *rndis_pkt; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; - struct ndis_tcp_ip_checksum_info *csum_info; - int hdr_offset; - u32 net_trans_info; u32 hash; - u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; @@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * more pages we try linearizing it. */ - skb_length = skb->len; num_data_pgs = netvsc_get_slots(skb) + 2; if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { @@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; + packet->total_bytes = skb->len; + packet->total_packets = 1; rndis_msg = (struct rndis_message *)skb->head; @@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) VLAN_PRIO_SHIFT; } - net_trans_info = get_net_transport_info(skb, &hdr_offset); - - /* - * Setup the sendside checksum offload only if this is not a - * GSO packet. - */ - if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { + if (skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; @@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ppi->ppi_offset); lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; - if (net_trans_info & (INFO_IPV4 << 16)) { + if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip_hdr(skb)->tot_len = 0; @@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; + lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (net_trans_info & INFO_TCP) { + if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { + struct ndis_tcp_ip_checksum_info *csum_info; + rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); @@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + ppi->ppi_offset); - if (net_trans_info & (INFO_IPV4 << 16)) + csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); + + if (skb->protocol == htons(ETH_P_IP)) { csum_info->transmit.is_ipv4 = 1; - else + + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } else { csum_info->transmit.is_ipv6 = 1; - csum_info->transmit.tcp_checksum = 1; - csum_info->transmit.tcp_header_offset = hdr_offset; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } } else { - /* UDP checksum (and other) offload is not supported. */ + /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) goto drop; } @@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); - if (likely(ret == 0)) { - struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->packets++; - tx_stats->bytes += skb_length; - u64_stats_update_end(&tx_stats->syncp); + if (likely(ret == 0)) return NETDEV_TX_OK; - } if (ret == -EAGAIN) { ++net_device_ctx->eth_stats.tx_busy; @@ -541,7 +538,6 @@ no_memory: ++net_device_ctx->eth_stats.tx_no_memory; goto drop; } - /* * netvsc_linkstatus_callback - Link up/down notification */ @@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, } static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, - struct hv_netvsc_packet *packet, - struct ndis_tcp_ip_checksum_info *csum_info, - void *data, u16 vlan_tci) + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan, + void *data, u32 buflen) { struct sk_buff *skb; - skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); + skb = netdev_alloc_skb_ip_align(net, buflen); if (!skb) return skb; @@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - memcpy(skb_put(skb, packet->total_data_buflen), data, - packet->total_data_buflen); + memcpy(skb_put(skb, buflen), data, buflen); skb->protocol = eth_type_trans(skb, net); @@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (vlan_tci & VLAN_TAG_PRESENT) + if (vlan) { + u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + } return skb; } @@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci) +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan) { - struct net_device *net = hv_get_drvdata(device_obj); struct net_device_context *net_device_ctx = netdev_priv(net); + struct netvsc_device *net_device = net_device_ctx->nvdev; struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; + u16 q_idx = channel->offermsg.offer.sub_channel_index; + if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; @@ -659,30 +658,31 @@ int netvsc_recv_callback(struct hv_device *device_obj, * policy filters on the host). Deliver these via the VF * interface in the guest. */ + rcu_read_lock(); vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); if (vf_netdev && (vf_netdev->flags & IFF_UP)) net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ - skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); + skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); if (unlikely(!skb)) { ++net->stats.rx_dropped; + rcu_read_unlock(); return NVSP_STAT_FAIL; } if (net != vf_netdev) - skb_record_rx_queue(skb, - channel->offermsg.offer.sub_channel_index); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics * on the synthetic device because modifying the VF device * statistics will not work correctly. */ - rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); + rx_stats = &net_device->chan_table[q_idx].rx_stats; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; - rx_stats->bytes += packet->total_data_buflen; + rx_stats->bytes += len; if (skb->pkt_type == PACKET_BROADCAST) ++rx_stats->broadcast; @@ -695,7 +695,8 @@ int netvsc_recv_callback(struct hv_device *device_obj, * is done. * TODO - use NAPI? */ - netif_rx(skb); + netif_receive_skb(skb); + rcu_read_unlock(); return 0; } @@ -719,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, + u32 num_chn) +{ + struct netvsc_device_info device_info; + int ret; + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = num_chn; + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = num_chn; + + ret = rndis_filter_device_add(dev, &device_info); + if (ret) + return ret; + + ret = netif_set_real_num_tx_queues(net, num_chn); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(net, num_chn); + + return ret; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; - struct netvsc_device_info device_info; - u32 num_chn; - u32 max_chn; - int ret = 0; - bool recovering = false; + unsigned int count = channels->combined_count; + int ret; + + /* We do not support separate count for rx, tx, or other */ + if (count == 0 || + channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (count > net->num_tx_queues || count > net->num_rx_queues) + return -EINVAL; if (net_device_ctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; - num_chn = nvdev->num_chn; - max_chn = min_t(u32, nvdev->max_chn, num_online_cpus()); - - if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) { - pr_info("vRSS unsupported before NVSP Version 5\n"); + if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) return -EINVAL; - } - /* We do not support rx, tx, or other */ - if (!channels || - channels->rx_count || - channels->tx_count || - channels->other_count || - (channels->combined_count < 1)) + if (count > nvdev->max_chn) return -EINVAL; - if (channels->combined_count > max_chn) { - pr_info("combined channels too high, using %d\n", max_chn); - channels->combined_count = max_chn; - } - ret = netvsc_close(net); if (ret) - goto out; + return ret; - do_set: net_device_ctx->start_remove = true; - rndis_filter_device_remove(dev); - - nvdev->num_chn = channels->combined_count; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; + rndis_filter_device_remove(dev, nvdev); - ret = rndis_filter_device_add(dev, &device_info); - if (ret) { - if (recovering) { - netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - return ret; - } - goto recover; - } - - nvdev = net_device_ctx->nvdev; - - ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set tx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } - - ret = netif_set_real_num_rx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set rx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } + ret = netvsc_set_queues(net, dev, count); + if (ret == 0) + nvdev->num_chn = count; + else + netvsc_set_queues(net, dev, nvdev->num_chn); - out: netvsc_open(net); net_device_ctx->start_remove = false; + /* We may have missed link change notifications */ schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; - - recover: - /* If the above failed, we attempt to recover through the same - * process but with the original number of channels. - */ - netdev_err(net, "could not set channels, recovering\n"); - recovering = true; - channels->combined_count = num_chn; - goto do_set; } static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) @@ -875,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = ndevctx->nvdev; struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - u32 num_chn; - int ret = 0; + int ret; if (ndevctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; @@ -885,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto out; - num_chn = nvdev->num_chn; - ndevctx->start_remove = true; - rndis_filter_device_remove(hdev); + rndis_filter_device_remove(hdev, nvdev); ndev->mtu = mtu; memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.num_chn = num_chn; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.num_chn = nvdev->num_chn; + device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_add(hdev, &device_info); out: @@ -912,34 +884,39 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - int cpu; - - for_each_possible_cpu(cpu) { - struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, - cpu); - struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, - cpu); - u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast; + struct netvsc_device *nvdev = ndev_ctx->nvdev; + int i; + + if (!nvdev) + return; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + const struct netvsc_stats *stats; + u64 packets, bytes, multicast; unsigned int start; + stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); - tx_packets = tx_stats->packets; - tx_bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + t->tx_bytes += bytes; + t->tx_packets += packets; + + stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); - rx_packets = rx_stats->packets; - rx_bytes = rx_stats->bytes; - rx_multicast = rx_stats->multicast + rx_stats->broadcast; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); - - t->tx_bytes += tx_bytes; - t->tx_packets += tx_packets; - t->rx_bytes += rx_bytes; - t->rx_packets += rx_packets; - t->multicast += rx_multicast; + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + multicast = stats->multicast + stats->broadcast; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + t->rx_bytes += bytes; + t->rx_packets += packets; + t->multicast += multicast; } t->tx_dropped = net->stats.tx_dropped; @@ -984,11 +961,19 @@ static const struct { { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, }; +#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) + +/* 4 statistics per queue (rx/tx packets/bytes) */ +#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) + static int netvsc_get_sset_count(struct net_device *dev, int string_set) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + switch (string_set) { case ETH_SS_STATS: - return ARRAY_SIZE(netvsc_stats); + return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -998,26 +983,109 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; const void *nds = &ndc->eth_stats; - int i; + const struct netvsc_stats *qstats; + unsigned int start; + u64 packets, bytes; + int i, j; - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + + for (j = 0; j < nvdev->num_chn; j++) { + qstats = &nvdev->chan_table[j].tx_stats; + + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + + qstats = &nvdev->chan_table[j].rx_stats; + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + } } static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, + memcpy(p + i * ETH_GSTRING_LEN, netvsc_stats[i].name, ETH_GSTRING_LEN); + + p += i * ETH_GSTRING_LEN; + for (i = 0; i < nvdev->num_chn; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; } } +static int +netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, + struct ethtool_rxnfc *info) +{ + info->data = RXH_IP_SRC | RXH_IP_DST; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + info->data = 0; + break; + } + + return 0; +} + +static int +netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = nvdev->num_chn; + return 0; + + case ETHTOOL_GRXFH: + return netvsc_get_rss_hash_opts(nvdev, info); + } + return -EOPNOTSUPP; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *net) { @@ -1027,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net) } #endif +static u32 netvsc_get_rxfh_key_size(struct net_device *dev) +{ + return NETVSC_HASH_KEYLEN; +} + +static u32 netvsc_rss_indir_size(struct net_device *dev) +{ + return ITAB_NUM; +} + +static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + indir[i] = rndis_dev->ind_table[i]; + } + + if (key) + memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); + + return 0; +} + +static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + if (indir[i] >= dev->num_rx_queues) + return -EINVAL; + + for (i = 0; i < ITAB_NUM; i++) + rndis_dev->ind_table[i] = indir[i]; + } + + if (!key) { + if (!indir) + return 0; + + key = rndis_dev->rss_key; + } + + return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1038,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_settings = netvsc_get_settings, .set_settings = netvsc_set_settings, + .get_rxnfc = netvsc_get_rxnfc, + .get_rxfh_key_size = netvsc_get_rxfh_key_size, + .get_rxfh_indir_size = netvsc_rss_indir_size, + .get_rxfh = netvsc_get_rxfh, + .set_rxfh = netvsc_set_rxfh, }; static const struct net_device_ops device_ops = { @@ -1158,15 +1293,6 @@ out_unlock: rtnl_unlock(); } -static void netvsc_free_netdev(struct net_device *netdev) -{ - struct net_device_context *net_device_ctx = netdev_priv(netdev); - - free_percpu(net_device_ctx->tx_stats); - free_percpu(net_device_ctx->rx_stats); - free_netdev(netdev); -} - static struct net_device *get_netvsc_bymac(const u8 *mac) { struct net_device *dev; @@ -1303,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev) static int netvsc_unregister_vf(struct net_device *vf_netdev) { struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; ndev = get_netvsc_byref(vf_netdev); @@ -1311,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); @@ -1331,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev, int ret; net = alloc_etherdev_mq(sizeof(struct net_device_context), - num_online_cpus()); + VRSS_CHANNEL_MAX); if (!net) return -ENOMEM; @@ -1346,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev, netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); - net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->tx_stats) { - free_netdev(net); - return -ENOMEM; - } - net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->rx_stats) { - free_percpu(net_device_ctx->tx_stats); - free_netdev(net); - return -ENOMEM; - } - hv_set_drvdata(dev, net); net_device_ctx->start_remove = false; @@ -1369,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev, INIT_LIST_HEAD(&net_device_ctx->reconfig_events); net->netdev_ops = &device_ops; - - net->hw_features = NETVSC_HW_FEATURES; - net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; - net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); @@ -1382,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT, + num_online_cpus()); ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - netvsc_free_netdev(net); + free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + /* hw_features computed in rndis_filter_device_add */ + net->features = net->hw_features | + NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + net->vlan_features = net->features; + nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; @@ -1407,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev); - netvsc_free_netdev(net); + rndis_filter_device_remove(dev, nvdev); + free_netdev(net); } return ret; @@ -1418,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev) { struct net_device *net; struct net_device_context *ndev_ctx; - struct netvsc_device *net_device; net = hv_get_drvdata(dev); @@ -1428,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev) } ndev_ctx = netdev_priv(net); - net_device = ndev_ctx->nvdev; /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels() * removing the device. @@ -1449,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev) * Call to the vsc driver to let it know that the device is being * removed */ - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, ndev_ctx->nvdev); hv_set_drvdata(dev, NULL); - netvsc_free_netdev(net); + free_netdev(net); return 0; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8d90904e0e49..19356f56b7b1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -57,6 +57,14 @@ struct rndis_request { u8 request_ext[RNDIS_EXT_LEN]; }; +static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa +}; + static struct rndis_device *get_rndis_device(void) { struct rndis_device *device; @@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev, } static void dump_rndis_message(struct hv_device *hv_dev, - struct rndis_message *rndis_msg) + const struct rndis_message *rndis_msg) { struct net_device *netdev = hv_get_drvdata(hv_dev); @@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) return NULL; } -static int rndis_filter_receive_data(struct rndis_device *dev, - struct rndis_message *msg, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +static int rndis_filter_receive_data(struct net_device *ndev, + struct rndis_device *dev, + struct rndis_message *msg, + struct vmbus_channel *channel, + void *data, u32 data_buflen) { - struct rndis_packet *rndis_pkt; + struct rndis_packet *rndis_pkt = &msg->msg.pkt; + const struct ndis_tcp_ip_checksum_info *csum_info; + const struct ndis_pkt_8021q_info *vlan; u32 data_offset; - struct ndis_pkt_8021q_info *vlan; - struct ndis_tcp_ip_checksum_info *csum_info; - u16 vlan_tci = 0; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - - rndis_pkt = &msg->msg.pkt; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; - pkt->total_data_buflen -= data_offset; + data_buflen -= data_offset; /* * Make sure we got a valid RNDIS message, now total_data_buflen * should be the data packet size plus the trailer padding size */ - if (pkt->total_data_buflen < rndis_pkt->data_len) { + if (unlikely(data_buflen < rndis_pkt->data_len)) { netdev_err(dev->ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", - pkt->total_data_buflen, rndis_pkt->data_len); + data_buflen, rndis_pkt->data_len); return NVSP_STAT_FAIL; } + vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); + /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - pkt->total_data_buflen = rndis_pkt->data_len; - *data = (void *)((unsigned long)(*data) + data_offset); - - vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); - if (vlan) { - vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | - (vlan->pri << VLAN_PRIO_SHIFT); - } - + data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data, - csum_info, channel, vlan_tci); + return netvsc_recv_callback(ndev, channel, + data, rndis_pkt->data_len, + csum_info, vlan); } -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen) { - struct net_device *ndev = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_dev = net_device_ctx->nvdev; - struct rndis_device *rndis_dev; - struct rndis_message *rndis_msg; - int ret = 0; - - if (!net_dev) { - ret = NVSP_STAT_FAIL; - goto exit; - } + struct rndis_device *rndis_dev = net_dev->extension; + struct rndis_message *rndis_msg = data; /* Make sure the rndis device state is initialized */ - if (!net_dev->extension) { - netdev_err(ndev, "got rndis message but no rndis device - " - "dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(!rndis_dev)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message but no rndis device!\n"); + return NVSP_STAT_FAIL; } - rndis_dev = (struct rndis_device *)net_dev->extension; - if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { - netdev_err(ndev, "got rndis message but rndis device " - "uninitialized...dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message uninitialized\n"); + return NVSP_STAT_FAIL; } - rndis_msg = *data; - - if (netif_msg_rx_err(net_device_ctx)) + if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - /* data msg */ - ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt, - data, channel); - break; - + return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: @@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev, break; } -exit: - return ret; + return 0; } static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, @@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->info_buflen = 0; query->dev_vc_handle = 0; - if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { + if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { + struct net_device_context *ndevctx = netdev_priv(dev->ndev); + struct netvsc_device *nvdev = ndevctx->nvdev; + struct ndis_offload *hwcaps; + u32 nvsp_version = nvdev->nvsp_version; + u8 ndis_rev; + size_t size; + + if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3; + size = NDIS_OFFLOAD_SIZE; + } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2; + size = NDIS_OFFLOAD_SIZE_6_1; + } else { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1; + size = NDIS_OFFLOAD_SIZE_6_0; + } + + request->request_msg.msg_len += size; + query->info_buflen = size; + hwcaps = (struct ndis_offload *) + ((unsigned long)query + query->info_buf_offset); + + hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD; + hwcaps->header.revision = ndis_rev; + hwcaps->header.size = size; + + } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { struct ndis_recv_scale_cap *cap; request->request_msg.msg_len += @@ -526,6 +537,44 @@ cleanup: return ret; } +/* Get the hardware offload capabilities */ +static int +rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +{ + u32 caps_len = sizeof(*caps); + int ret; + + memset(caps, 0, sizeof(*caps)); + + ret = rndis_filter_query_device(dev, + OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, + caps, &caps_len); + if (ret) + return ret; + + if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) { + netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n", + caps->header.type); + return -EINVAL; + } + + if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) { + netdev_warn(dev->ndev, "invalid NDIS objrev %x\n", + caps->header.revision); + return -EINVAL; + } + + if (caps->header.size > caps_len || + caps->header.size < NDIS_OFFLOAD_SIZE_6_0) { + netdev_warn(dev->ndev, + "invalid NDIS objsize %u, data size %u\n", + caps->header.size, caps_len); + return -EINVAL; + } + + return 0; +} + static int rndis_filter_query_device_mac(struct rndis_device *dev) { u32 size = ETH_ALEN; @@ -663,23 +712,15 @@ cleanup: return ret; } -static const u8 netvsc_hash_key[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa -}; -#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key) - -static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key, int num_queue) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_recv_scale_param) + - 4*ITAB_NUM + HASH_KEYLEN; + 4 * ITAB_NUM + NETVSC_HASH_KEYLEN; struct ndis_recv_scale_param *rssp; u32 *itab; u8 *keyp; @@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); - rssp->hashkey_size = HASH_KEYLEN; + rssp->hashkey_size = NETVSC_HASH_KEYLEN; rssp->kashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = i % num_queue; + itab[i] = rdev->ind_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); - for (i = 0; i < HASH_KEYLEN; i++) - keyp[i] = netvsc_hash_key[i]; + memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); if (ret != 0) @@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { + if (set_complete->status == RNDIS_STATUS_SUCCESS) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; - u32 status; int ret; request = get_rndis_request(dev, RNDIS_MSG_SET, @@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - status = set_complete->status; - cleanup: if (request) put_rndis_request(dev, request); @@ -864,6 +903,23 @@ cleanup: return ret; } +static bool netvsc_device_idle(const struct netvsc_device *nvdev) +{ + int i; + + if (atomic_read(&nvdev->num_outstanding_recvs) > 0) + return false; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + + if (atomic_read(&nvchan->queue_sends) > 0) + return false; + } + + return true; +} + static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; @@ -894,9 +950,7 @@ cleanup: spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); /* Wait for all send completions */ - wait_event(nvdev->wait_drain, - atomic_read(&nvdev->num_outstanding_sends) == 0 && - atomic_read(&nvdev->num_outstanding_recvs) == 0); + wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); if (request) put_rndis_request(dev, request); @@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (chn_index >= nvscdev->num_chn) return; - set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) * - NETVSC_PACKET_SIZE); - - nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + nvscdev->chan_table[chn_index].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); if (ret == 0) - nvscdev->chn_table[chn_index] = new_sc; + nvscdev->chan_table[chn_index].channel = new_sc; spin_lock_irqsave(&nvscdev->sc_lock, flags); nvscdev->num_sc_offered--; @@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) } int rndis_filter_device_add(struct hv_device *dev, - void *additional_info) + struct netvsc_device_info *device_info) { - int ret; struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; - struct netvsc_device_info *device_info = additional_info; + struct ndis_offload hwcaps; struct ndis_offload_params offloads; struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + unsigned int gso_max_size = GSO_MAX_SIZE; u32 mtu, size; u32 num_rss_qs; u32 sc_delta; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; unsigned long flags; + int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) @@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev, * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, additional_info); + ret = netvsc_device_add(dev, device_info); if (ret != 0) { kfree(rndis_device); return ret; @@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } @@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev, /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); - /* Turn on the offloads; the host supports all of the relevant - * offloads. - */ + /* Find HW offload capabilities */ + ret = rndis_query_hwcaps(rndis_device, &hwcaps); + if (ret != 0) { + rndis_filter_device_remove(dev, net_device); + return ret; + } + + /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); - /* A value of zero means "no change"; now turn on what we - * want. - */ - offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + + /* Linux does not care about IP checksum, always does in kernel */ + offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; + + /* Compute tx offload settings based on hw capabilities */ + net->hw_features = NETIF_F_RXCSUM; + + if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { + /* Can checksum TCP */ + net->hw_features |= NETIF_F_IP_CSUM; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP; + + offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + + if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) { + offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO; + + if (hwcaps.lsov2.ip4_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip4_maxsz; + } + + if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { + offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP; + } + } + + if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) { + net->hw_features |= NETIF_F_IPV6_CSUM; + + offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP; + + if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) && + (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) { + offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO6; + + if (hwcaps.lsov2.ip6_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip6_maxsz; + } + + if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { + offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP; + } + } + + netif_set_gso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, &offloads); if (ret) @@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); num_rss_qs = net_device->num_chn - 1; + + for (i = 0; i < ITAB_NUM; i++) + rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, + net_device->num_chn); + net_device->num_sc_offered = num_rss_qs; if (net_device->num_chn == 1) goto out; - net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) * - NETVSC_PACKET_SIZE); - if (!net_device->sub_cb_buf) { - net_device->num_chn = 1; - dev_info(&dev->device, "No memory for subchannels.\n"); - goto out; - } - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; @@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; - ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); + ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, + net_device->num_chn); /* * Set the number of sub-channels to be received. @@ -1152,13 +1248,13 @@ out: return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } -void rndis_filter_device_remove(struct hv_device *dev) +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *net_dev) { - struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev); struct rndis_device *rndis_dev = net_dev->extension; /* If not all subchannel offers are complete, wait for them until diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 46d53a6c8cf8..76ba7ecfe142 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi) /* Reset */ if (gpio_is_valid(rstn)) { udelay(1); - gpio_set_value(rstn, 0); + gpio_set_value_cansleep(rstn, 0); udelay(1); - gpio_set_value(rstn, 1); + gpio_set_value_cansleep(rstn, 1); usleep_range(120, 240); } diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 1253f864737a..ef688518ad77 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg) { struct usb_device *usb_dev = atusb->usb_dev; int ret; + uint8_t *buffer; uint8_t value; + buffer = kmalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, - 0, reg, &value, 1, 1000); - return ret >= 0 ? value : ret; + 0, reg, buffer, 1, 1000); + + if (ret >= 0) { + value = buffer[0]; + kfree(buffer); + return value; + } else { + kfree(buffer); + return ret; + } } static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, @@ -549,13 +562,6 @@ static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; - struct device *dev = &atusb->usb_dev->dev; - - if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { - dev_info(dev, "Automatic frame retransmission is only available from " - "firmware version 0.3. Please update if you want this feature."); - return -EINVAL; - } return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); } @@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = { static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[3]; + unsigned char *buffer; int ret; + buffer = kmalloc(3, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Get a couple of the ATMega Firmware values */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, @@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb) dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } + kfree(buffer); return ret; } static int atusb_get_and_show_build(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - char build[ATUSB_BUILD_SIZE + 1]; + char *build; int ret; + build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); + if (!build) + return -ENOMEM; + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); @@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb) dev_info(&usb_dev->dev, "Firmware: build %s\n", build); } + kfree(build); return ret; } @@ -698,7 +714,7 @@ fail: static int atusb_set_extended_addr(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; + unsigned char *buffer; __le64 extended_addr; u64 addr; int ret; @@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb) return 0; } + buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Firmware is new enough so we fetch the address from EEPROM */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000); - if (ret < 0) - dev_err(&usb_dev->dev, "failed to fetch extended address\n"); + if (ret < 0) { + dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n"); + ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); + kfree(buffer); + return ret; + } memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); /* Check if read address is not empty and the unicast bit is set correctly */ @@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb) &addr); } + kfree(buffer); return ret; } @@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface, hw->parent = &usb_dev->dev; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | - IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS | - IEEE802154_HW_FRAME_RETRIES; + IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; @@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface, atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); + if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3) + hw->flags |= IEEE802154_HW_FRAME_RETRIES; + ret = atusb_get_and_clear_error(atusb); if (ret) { dev_err(&atusb->usb_dev->dev, diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 92b221a03350..95b18f4602cf 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev) return -EINVAL; } - if (netif_is_macvlan_port(dev)) { - netdev_err(dev, "Master is a macvlan port.\n"); + if (netdev_is_rx_handler_busy(dev)) { + netdev_err(dev, "Device is already in use.\n"); return -EBUSY; } @@ -550,6 +550,9 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE, GFP_KERNEL); if (err < 0) + err = ida_simple_get(&port->ida, 0x1, port->dev_id_start, + GFP_KERNEL); + if (err < 0) goto destroy_ipvlan_port; dev->dev_id = err; /* Increment id-base to the next slot for the future assignment */ diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index be5bb0b7f29c..3151b580dbd6 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -22,7 +22,7 @@ static int max_rate = 57600; static int max_rate = 115200; #endif -static void turnaround_delay(unsigned long last_jif, int mtt) +static void turnaround_delay(int mtt) { long ticks; @@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev) UART_CLEAR_LSR(port); ch = UART_GET_CHAR(port); async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); - dev->last_rx = jiffies; } static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id) @@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work) int tx_cnt = 10; while (bfin_sir_is_receiving(dev) && --tx_cnt) - turnaround_delay(dev->last_rx, self->mtt); + turnaround_delay(self->mtt); bfin_sir_stop_rx(port); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index e3fe9a286136..fede6864c737 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self) async_unwrap_char(self->ndev, &self->ndev->stats, &self->rx_buff, (u8)data); - self->ndev->last_rx = jiffies; if (EOFD & sh_sir_read(self, IRIF_SIR_FRM)) continue; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 440ab3d8adf7..cbfc1be23a0e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1110,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev) if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; - if (netif_is_ipvlan_port(dev)) + if (netdev_is_rx_handler_busy(dev)) return -EBUSY; port = kzalloc(sizeof(*port), GFP_KERNEL); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5c26653eceb5..402618565838 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, return -EINVAL; if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - macvtap_is_little_endian(q))) + macvtap_is_little_endian(q), true)) BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index d361835b315d..8dbd59baa34d 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -279,6 +279,7 @@ config MARVELL_PHY config MESON_GXL_PHY tristate "Amlogic Meson GXL Internal PHY" + depends on ARCH_MESON || COMPILE_TEST ---help--- Currently has a driver for the Amlogic Meson GXL Internal PHY diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index e741bf614c4e..b0492ef2cdaa 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver"); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_LICENSE("GPL"); +static int bcm63xx_config_intr(struct phy_device *phydev) +{ + int reg, err; + + reg = phy_read(phydev, MII_BCM63XX_IR); + if (reg < 0) + return reg; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + reg &= ~MII_BCM63XX_IR_GMASK; + else + reg |= MII_BCM63XX_IR_GMASK; + + err = phy_write(phydev, MII_BCM63XX_IR, reg); + return err; +} + static int bcm63xx_config_init(struct phy_device *phydev) { int reg, err; @@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = { .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, - .config_intr = bcm_phy_config_intr, + .config_intr = bcm63xx_config_intr, }, { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, @@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = { .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, - .config_intr = bcm_phy_config_intr, + .config_intr = bcm63xx_config_intr, } }; module_phy_driver(bcm63xx_driver); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 264b085d796b..aa01020ab1b9 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev) +{ + /* +1 RC_CAL codes for RL centering for both LT and HT conditions */ + bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003); + + /* Cut master bias current by 2% to compensate for RC_CAL offset */ + bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b); + + /* Improve hybrid leakage */ + bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3); + + /* Change rx_on_tune 8 to 0xf */ + bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6); + + /* Change 100Tx EEE bandwidth */ + bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d); + + /* Enable ffe zero detection for Vitesse interoperability */ + bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015); + + r_rc_cal_reset(phydev); + + return 0; +} + static int bcm7xxx_28nm_config_init(struct phy_device *phydev) { u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags); @@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) u8 count; int ret = 0; + /* Newer devices have moved the revision information back into a + * standard location in MII_PHYS_ID[23] + */ + if (rev == 0) + rev = phydev->phy_id & ~phydev->drv->phy_id_mask; + pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", phydev_name(phydev), phydev->drv->name, rev, patch); @@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) case 0x10: ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev); break; + case 0x01: + ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev); + break; default: break; } @@ -416,6 +450,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), @@ -430,6 +465,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7278, 0xfffffff0, }, { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, { PHY_ID_BCM7346, 0xfffffff0, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 4223e35490b0..794b9ec81ba5 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -30,6 +30,22 @@ MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); +static int bcm54210e_config_init(struct phy_device *phydev) +{ + int val; + + val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); + val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + val |= MII_BCM54XX_AUXCTL_MISC_WREN; + bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val); + + val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL); + val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; + bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val); + + return 0; +} + static int bcm54810_config(struct phy_device *phydev) { int rc, val; @@ -230,7 +246,11 @@ static int bcm54xx_config_init(struct phy_device *phydev) (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) bcm54xx_adjust_rxrefclk(phydev); - if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { + if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) { + err = bcm54210e_config_init(phydev); + if (err) + return err; + } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { err = bcm54810_config(phydev); if (err) return err; @@ -395,12 +415,10 @@ static int bcm54612e_config_aneg(struct phy_device *phydev) (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { u16 reg; - /* Errata: reads require filling in the write selector field */ - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, - MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC); - reg = phy_read(phydev, MII_BCM54XX_AUX_CTL); + reg = bcm54xx_auxctl_read(phydev, + MII_BCM54XX_AUXCTL_SHDWSEL_MISC); /* Disable RXD to RXC delay (default set) */ - reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW; + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; /* Clear shadow selector field */ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, @@ -544,6 +562,17 @@ static struct phy_driver broadcom_drivers[] = { .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { + .phy_id = PHY_ID_BCM54210E, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM54210E", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = bcm54xx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = bcm_phy_ack_intr, + .config_intr = bcm_phy_config_intr, +}, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", @@ -682,6 +711,7 @@ module_phy_driver(broadcom_drivers); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, { PHY_ID_BCM5421, 0xfffffff0 }, + { PHY_ID_BCM54210E, 0xfffffff0 }, { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM54612E, 0xfffffff0 }, { PHY_ID_BCM54616S, 0xfffffff0 }, diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 800b39f06279..a10d0e7fc5f7 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -17,6 +17,7 @@ #include <linux/phy.h> #define TI_DP83848C_PHY_ID 0x20005ca0 +#define TI_DP83620_PHY_ID 0x20005ce0 #define NS_DP83848C_PHY_ID 0x20005c90 #define TLK10X_PHY_ID 0x2000a210 #define TI_DP83822_PHY_ID 0x2000a240 @@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev) static struct mdio_device_id __maybe_unused dp83848_tbl[] = { { TI_DP83848C_PHY_ID, 0xfffffff0 }, { NS_DP83848C_PHY_ID, 0xfffffff0 }, + { TI_DP83620_PHY_ID, 0xfffffff0 }, { TLK10X_PHY_ID, 0xfffffff0 }, { TI_DP83822_PHY_ID, 0xfffffff0 }, { } @@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), }; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index e84ae084e259..ca1b462bf7b2 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -132,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)) return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; return of_property_read_u32(of_node, "ti,fifo-depth", diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e269262471a4..a3e3733813a7 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -17,8 +17,10 @@ */ #include <linux/kernel.h> #include <linux/string.h> +#include <linux/ctype.h> #include <linux/errno.h> #include <linux/unistd.h> +#include <linux/hwmon.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -90,6 +92,17 @@ #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) #define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_MISC_TEST 0x1a +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT 8 +#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN BIT(7) +#define MII_88E1510_MISC_TEST_TEMP_IRQ BIT(6) +#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN BIT(5) +#define MII_88E1121_MISC_TEST_TEMP_MASK 0x1f + +#define MII_88E1510_TEMP_SENSOR 0x1b +#define MII_88E1510_TEMP_SENSOR_MASK 0xff + #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = { struct marvell_priv { u64 stats[ARRAY_SIZE(marvell_hw_stats)]; + char *hwmon_name; + struct device *hwmon_dev; }; static int marvell_ack_interrupt(struct phy_device *phydev) @@ -1192,7 +1207,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE) { + if (phydev->supported & SUPPORTED_FIBRE && + phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); if (err < 0) goto error; @@ -1467,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } +#ifdef CONFIG_HWMON +static int m88e1121_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + int val; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = phy_read(phydev, MII_88E1121_MISC_TEST); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1121_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1121_get_temp(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static umode_t m88e1121_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static u32 m88e1121_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_chip = { + .type = hwmon_chip, + .config = m88e1121_hwmon_chip_config, +}; + +static u32 m88e1121_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1121_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1121_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1121_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = { + .is_visible = m88e1121_hwmon_is_visible, + .read = m88e1121_hwmon_read, +}; + +static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { + .ops = &m88e1121_hwmon_hwmon_ops, + .info = m88e1121_hwmon_info, +}; + +static int m88e1510_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR); + if (ret < 0) + goto error; + + *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >> + MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25; + /* convert to mC */ + *temp *= 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_set_temp_critical(struct phy_device *phydev, long temp) +{ + int ret; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + temp = temp / 1000; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) | + (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT)); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) +{ + int ret; + + *alarm = false; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1510_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1510_get_temp(phydev, temp); + break; + case hwmon_temp_crit: + err = m88e1510_get_temp_critical(phydev, temp); + break; + case hwmon_temp_max_alarm: + err = m88e1510_get_temp_alarm(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static int m88e1510_hwmon_write(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_crit: + err = m88e1510_set_temp_critical(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + return err; +} + +static umode_t m88e1510_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_crit: + return 0644; + default: + return 0; + } +} + +static u32 m88e1510_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, + 0 +}; + +static const struct hwmon_channel_info m88e1510_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1510_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1510_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1510_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = { + .is_visible = m88e1510_hwmon_is_visible, + .read = m88e1510_hwmon_read, + .write = m88e1510_hwmon_write, +}; + +static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { + .ops = &m88e1510_hwmon_hwmon_ops, + .info = m88e1510_hwmon_info, +}; + +static int marvell_hwmon_name(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + const char *devname = dev_name(dev); + size_t len = strlen(devname); + int i, j; + + priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL); + if (!priv->hwmon_name) + return -ENOMEM; + + for (i = j = 0; i < len && devname[i]; i++) { + if (isalnum(devname[i])) + priv->hwmon_name[j++] = devname[i]; + } + + return 0; +} + +static int marvell_hwmon_probe(struct phy_device *phydev, + const struct hwmon_chip_info *chip) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int err; + + err = marvell_hwmon_name(phydev); + if (err) + return err; + + priv->hwmon_dev = devm_hwmon_device_register_with_info( + dev, priv->hwmon_name, phydev, chip, NULL); + + return PTR_ERR_OR_ZERO(priv->hwmon_dev); +} + +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info); +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); +} +#else +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + static int marvell_probe(struct phy_device *phydev) { struct marvell_priv *priv; @@ -1480,14 +1861,47 @@ static int marvell_probe(struct phy_device *phydev) return 0; } +static int m88e1121_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1121_hwmon_probe(phydev); +} + +static int m88e1510_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1510_hwmon_probe(phydev); +} + +static void marvell_remove(struct phy_device *phydev) +{ +#ifdef CONFIG_HWMON + + struct marvell_priv *priv = phydev->priv; + + if (priv && priv->hwmon_dev) + hwmon_device_unregister(priv->hwmon_dev); +#endif +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, - .probe = marvell_probe, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1559,7 +1973,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = &m88e1121_probe, + .remove = &marvell_remove, .config_init = &m88e1121_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, @@ -1671,13 +2086,16 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1510", .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = &m88e1510_probe, + .remove = &marvell_remove, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .did_interrupt = &m88e1121_did_interrupt, + .get_wol = &m88e1318_get_wol, + .set_wol = &m88e1318_set_wol, .resume = &marvell_resume, .suspend = &marvell_suspend, .get_sset_count = marvell_get_sset_count, @@ -1690,7 +2108,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1540", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = m88e1510_probe, + .remove = &marvell_remove, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 27ab63064f95..7faa79b254ef 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -32,8 +32,7 @@ struct mdio_gpio_info { struct mdiobb_ctrl ctrl; - int mdc, mdio, mdo; - int mdc_active_low, mdio_active_low, mdo_active_low; + struct gpio_desc *mdc, *mdio, *mdo; }; static void *mdio_gpio_of_get_data(struct platform_device *pdev) @@ -80,16 +79,14 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpio_set_value_cansleep(bitbang->mdo, - 1 ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, 1); return; } if (dir) - gpio_direction_output(bitbang->mdio, - 1 ^ bitbang->mdio_active_low); + gpiod_direction_output(bitbang->mdio, 1); else - gpio_direction_input(bitbang->mdio); + gpiod_direction_input(bitbang->mdio); } static int mdio_get(struct mdiobb_ctrl *ctrl) @@ -97,8 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value_cansleep(bitbang->mdio) ^ - bitbang->mdio_active_low; + return gpiod_get_value(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -107,11 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpio_set_value_cansleep(bitbang->mdo, - what ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, what); else - gpio_set_value_cansleep(bitbang->mdio, - what ^ bitbang->mdio_active_low); + gpiod_set_value(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -119,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpiod_set_value(bitbang->mdc, what); } static struct mdiobb_ops mdio_gpio_ops = { @@ -137,6 +131,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; int i; + int mdc, mdio, mdo; + unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; + unsigned long mdio_flags = GPIOF_DIR_IN; + unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) @@ -144,12 +142,20 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->ctrl.reset = pdata->reset; - bitbang->mdc = pdata->mdc; - bitbang->mdc_active_low = pdata->mdc_active_low; - bitbang->mdio = pdata->mdio; - bitbang->mdio_active_low = pdata->mdio_active_low; - bitbang->mdo = pdata->mdo; - bitbang->mdo_active_low = pdata->mdo_active_low; + mdc = pdata->mdc; + bitbang->mdc = gpio_to_desc(mdc); + if (pdata->mdc_active_low) + mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW; + mdio = pdata->mdio; + bitbang->mdio = gpio_to_desc(mdio); + if (pdata->mdio_active_low) + mdio_flags |= GPIOF_ACTIVE_LOW; + mdo = pdata->mdo; + if (mdo) { + bitbang->mdo = gpio_to_desc(mdo); + if (pdata->mdo_active_low) + mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW; + } new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) @@ -174,20 +180,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request(dev, bitbang->mdc, "mdc")) + if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc")) goto out_free_bus; - if (devm_gpio_request(dev, bitbang->mdio, "mdio")) + if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio")) goto out_free_bus; - if (bitbang->mdo) { - if (devm_gpio_request(dev, bitbang->mdo, "mdo")) - goto out_free_bus; - gpio_direction_output(bitbang->mdo, 1); - gpio_direction_input(bitbang->mdio); - } - - gpio_direction_output(bitbang->mdc, 0); + if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo")) + goto out_free_bus; dev_set_drvdata(dev, new_bus); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9a77289109b7..e55809c5beb7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, +}, { + .phy_id = PHY_ID_KSZ8795, + .phy_id_mask = MICREL_PHY_ID_MASK, + .name = "Micrel KSZ8795", + .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = ksz8873mll_config_aneg, + .read_status = ksz8873mll_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 48da6e93c3f7..7cc1b7dcfe05 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -29,6 +29,7 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> +#include <linux/phy_led_triggers.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mdio.h> @@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev) * phy_trigger_machine - trigger the state machine to run * * @phydev: the phy_device struct + * @sync: indicate whether we should wait for the workqueue cancelation * * Description: There has been a change in state which requires that the * state machine runs. */ -static void phy_trigger_machine(struct phy_device *phydev) +static void phy_trigger_machine(struct phy_device *phydev, bool sync) { - cancel_delayed_work_sync(&phydev->state_queue); + if (sync) + cancel_delayed_work_sync(&phydev->state_queue); + else + cancel_delayed_work(&phydev->state_queue); queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); } @@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev) phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); - phy_trigger_machine(phydev); + phy_trigger_machine(phydev, false); } /** @@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev) } /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev); + phy_trigger_machine(phydev, true); return; ignore: @@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev) if (do_resume) phy_resume(phydev); - phy_trigger_machine(phydev); + phy_trigger_machine(phydev, true); } EXPORT_SYMBOL(phy_start); diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index fa62bdf2f526..94ca42e630bb 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -12,6 +12,7 @@ */ #include <linux/leds.h> #include <linux/phy.h> +#include <linux/phy_led_triggers.h> #include <linux/netdevice.h> static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, @@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy) sizeof(struct phy_led_trigger) * phy->phy_num_led_triggers, GFP_KERNEL); - if (!phy->phy_led_triggers) - return -ENOMEM; + if (!phy->phy_led_triggers) { + err = -ENOMEM; + goto out_clear; + } for (i = 0; i < phy->phy_num_led_triggers; i++) { err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], @@ -120,6 +123,8 @@ out_unreg: while (i--) phy_led_trigger_unregister(&phy->phy_led_triggers[i]); devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); +out_clear: + phy->phy_num_led_triggers = 0; return err; } EXPORT_SYMBOL_GPL(phy_led_triggers_register); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8c1d3bd6b4d0..8a7d6b905362 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -218,6 +218,7 @@ struct tun_struct { struct list_head disabled; void *security; u32 flow_count; + u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; }; @@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile) while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) kfree_skb(skb); + skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } @@ -1139,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, return skb; } +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, + struct sk_buff *skb, int more) +{ + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; + struct sk_buff_head process_queue; + u32 rx_batched = tun->rx_batched; + bool rcv = false; + + if (!rx_batched || (!more && skb_queue_empty(queue))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + return; + } + + spin_lock(&queue->lock); + if (!more || skb_queue_len(queue) == rx_batched) { + __skb_queue_head_init(&process_queue); + skb_queue_splice_tail_init(queue, &process_queue); + rcv = true; + } else { + __skb_queue_tail(queue, skb); + } + spin_unlock(&queue->lock); + + if (rcv) { + struct sk_buff *nskb; + + local_bh_disable(); + while ((nskb = __skb_dequeue(&process_queue))) + netif_receive_skb(nskb); + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rxhash = skb_get_hash(skb); #ifndef CONFIG_4KSTACKS - local_bh_disable(); - netif_receive_skb(skb); - local_bh_enable(); + tun_rx_batched(tun, tfile, skb, more); #else netif_rx_ni(skb); #endif @@ -1311,7 +1347,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!tun) return -EBADFD; - result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); + result = tun_get_user(tun, tfile, NULL, from, + file->f_flags & O_NONBLOCK, false); tun_put(tun); return result; @@ -1359,7 +1396,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, - tun_is_little_endian(tun))) { + tun_is_little_endian(tun), true)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1569,7 +1606,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return -EBADFD; ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, - m->msg_flags & MSG_DONTWAIT); + m->msg_flags & MSG_DONTWAIT, + m->msg_flags & MSG_MORE); tun_put(tun); return ret; } @@ -1770,6 +1808,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; + tun->rx_batched = 0; tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); if (!tun->pcpu_stats) { @@ -2438,6 +2477,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) #endif } +static int tun_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + ec->rx_max_coalesced_frames = tun->rx_batched; + + return 0; +} + +static int tun_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) + tun->rx_batched = NAPI_POLL_WEIGHT; + else + tun->rx_batched = ec->rx_max_coalesced_frames; + + return 0; +} + static const struct ethtool_ops tun_ethtool_ops = { .get_settings = tun_get_settings, .get_drvinfo = tun_get_drvinfo, @@ -2445,6 +2507,8 @@ static const struct ethtool_ops tun_ethtool_ops = { .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_coalesce = tun_get_coalesce, + .set_coalesce = tun_set_coalesce, }; static int tun_queue_resize(struct tun_struct *tun) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index fe7b2886cb6b..f5552aaaa77a 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * connected. This causes the link state to be incorrect. Work around this by * always setting the state to off, then on. */ -void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) +static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; @@ -531,6 +531,7 @@ static const struct driver_info wwan_info = { #define SAMSUNG_VENDOR_ID 0x04e8 #define LENOVO_VENDOR_ID 0x17ef #define NVIDIA_VENDOR_ID 0x0955 +#define HP_VENDOR_ID 0x03f0 static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -677,6 +678,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* HP lt2523 (Novatel E371) - handled by qmi_wwan */ +{ + USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* AnyDATA ADU960S - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6fe1cdb0174f..24d5272cdce5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -654,6 +654,13 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* HP lt2523 (Novatel E371) */ + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7dc61228c55b..986243c932cc 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "6" +#define NET_VERSION "8" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; - if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) + if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); @@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) napi_complete(napi); if (!list_empty(&tp->rx_done)) napi_schedule(napi); + else if (!skb_queue_empty(&tp->tx_queue) && + !list_empty(&tp->tx_free)) + napi_schedule(napi); } return work_done; @@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp) if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); + netif_stop_queue(netdev); napi_disable(&tp->napi); netif_carrier_on(netdev); rtl_start_rx(tp); napi_enable(&tp->napi); + netif_wake_queue(netdev); + netif_info(tp, link, netdev, "carrier on\n"); } } else { if (netif_carrier_ok(netdev)) { @@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp) napi_disable(&tp->napi); tp->rtl_ops.disable(tp); napi_enable(&tp->napi); + netif_info(tp, link, netdev, "carrier off\n"); } } } @@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf) if (!netif_running(netdev)) return 0; + netif_stop_queue(netdev); napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); if (netif_carrier_ok(netdev)) { - netif_stop_queue(netdev); mutex_lock(&tp->control); tp->rtl_ops.disable(tp); mutex_unlock(&tp->control); @@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf) if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); + rtl_start_rx(tp); rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); - netif_wake_queue(netdev); } napi_enable(&tp->napi); + netif_wake_queue(netdev); + usb_submit_urb(tp->intr_urb, GFP_KERNEL); + + if (!list_empty(&tp->rx_done)) + napi_schedule(&tp->napi); return 0; } @@ -3572,43 +3584,98 @@ static bool delay_autosuspend(struct r8152 *tp) */ if (!sw_linking && tp->rtl_ops.in_nway(tp)) return true; + else if (!skb_queue_empty(&tp->tx_queue)) + return true; else return false; } -static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) +static int rtl8152_runtime_suspend(struct r8152 *tp) { - struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev = tp->netdev; int ret = 0; - mutex_lock(&tp->control); + set_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); + + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { + u32 rcr = 0; - if (PMSG_IS_AUTO(message)) { - if (netif_running(netdev) && delay_autosuspend(tp)) { + if (delay_autosuspend(tp)) { + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); ret = -EBUSY; goto out1; } - set_bit(SELECTIVE_SUSPEND, &tp->flags); - } else { - netif_device_detach(netdev); + if (netif_carrier_ok(netdev)) { + u32 ocp_data; + + rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data = rcr & ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + rxdy_gated_en(tp, true); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, + PLA_OOB_CTRL); + if (!(ocp_data & RXFIFO_EMPTY)) { + rxdy_gated_en(tp, false); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); + ret = -EBUSY; + goto out1; + } + } + + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + + tp->rtl_ops.autosuspend_en(tp, true); + + if (netif_carrier_ok(netdev)) { + napi_disable(&tp->napi); + rtl_stop_rx(tp); + rxdy_gated_en(tp, false); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); + napi_enable(&tp->napi); + } } +out1: + return ret; +} + +static int rtl8152_system_suspend(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + int ret = 0; + + netif_device_detach(netdev); + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); napi_disable(&tp->napi); - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - rtl_stop_rx(tp); - tp->rtl_ops.autosuspend_en(tp, true); - } else { - cancel_delayed_work_sync(&tp->schedule); - tp->rtl_ops.down(tp); - } + cancel_delayed_work_sync(&tp->schedule); + tp->rtl_ops.down(tp); napi_enable(&tp->napi); } -out1: + + return ret; +} + +static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct r8152 *tp = usb_get_intfdata(intf); + int ret; + + mutex_lock(&tp->control); + + if (PMSG_IS_AUTO(message)) + ret = rtl8152_runtime_suspend(tp); + else + ret = rtl8152_system_suspend(tp); + mutex_unlock(&tp->control); return ret; @@ -3629,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf) if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.autosuspend_en(tp, false); - clear_bit(SELECTIVE_SUSPEND, &tp->flags); napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(tp->netdev)) rtl_start_rx(tp); napi_enable(&tp->napi); + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); + if (!list_empty(&tp->rx_done)) + napi_schedule(&tp->napi); } else { tp->rtl_ops.up(tp); netif_carrier_off(tp->netdev); @@ -4308,6 +4378,11 @@ static int rtl8152_probe(struct usb_interface *intf, NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + if (tp->version == RTL_VER_01) { + netdev->features &= ~NETIF_F_RXCSUM; + netdev->hw_features &= ~NETIF_F_RXCSUM; + } + netdev->ethtool_ops = &ops; netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 37db91d1a0a3..bd22cf306a92 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -23,6 +23,7 @@ #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> @@ -48,8 +49,16 @@ module_param(gso, bool, 0444); */ DECLARE_EWMA(pkt_len, 1, 64) +/* With mergeable buffers we align buffer address and use the low bits to + * encode its true size. Buffer size is up to 1 page so we need to align to + * square root of page size to ensure we reserve enough bits to encode the true + * size. + */ +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) + /* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) #define VIRTNET_DRIVER_VERSION "1.0.0" @@ -330,7 +339,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_xmit(struct virtnet_info *vi, +static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, struct send_queue *sq, struct xdp_buff *xdp, @@ -382,10 +391,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi, put_page(page); } else /* small buffer */ kfree_skb(data); - return; // On error abort to avoid unnecessary kick + /* On error abort to avoid unnecessary kick */ + return false; } virtqueue_kick(sq->vq); + return true; } static u32 do_xdp_prog(struct virtnet_info *vi, @@ -421,11 +432,14 @@ static u32 do_xdp_prog(struct virtnet_info *vi, vi->xdp_queue_pairs + smp_processor_id(); xdp.data = buf; - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, + data))) + trace_xdp_exception(vi->dev, xdp_prog, act); return XDP_TX; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: return XDP_DROP; } @@ -1104,7 +1118,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr = skb_vnet_hdr(skb); if (virtio_net_hdr_from_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev))) + virtio_is_little_endian(vi->vdev), false)) BUG(); if (vi->mergeable_rx_bufs) @@ -1704,6 +1718,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) u16 xdp_qp = 0, curr_qp; int i, err; + if (prog && prog->xdp_adjust_head) { + netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || @@ -1887,8 +1906,12 @@ static void free_receive_page_frags(struct virtnet_info *vi) put_page(vi->rq[i].alloc_frag.page); } -static bool is_xdp_queue(struct virtnet_info *vi, int q) +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) { + /* For small receive mode always use kfree_skb variants */ + if (!vi->mergeable_rx_bufs) + return false; + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) return false; else if (q < vi->curr_queue_pairs) @@ -1905,7 +1928,7 @@ static void free_unused_bufs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (!is_xdp_queue(vi, i)) + if (!is_xdp_raw_buffer_queue(vi, i)) dev_kfree_skb(buf); else put_page(virt_to_head_page(buf)); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e34b1297c96a..25bc764ae7dc 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget) rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_all_intrs(rx_queue->adapter); } return rxd_done; @@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); } return rxd_done; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 895e3e258543..264fc1585b3c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -262,7 +262,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, + .flowi4_proto = ip4h->protocol, .daddr = ip4h->daddr, + .saddr = ip4h->saddr, }; struct net *net = dev_net(vrf_dev); struct rtable *rt; @@ -1249,6 +1251,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); + if (vrf->tb_id == RT_TABLE_UNSPEC) + return -EINVAL; dev->priv_flags |= IFF_L3MDEV_MASTER; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bb70dd5723b5..2e48ce22eabf 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, struct vxlan_sock *sock4, struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr, + __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; fl4.saddr = *saddr; + fl4.fl4_dport = dport; + fl4.fl4_sport = sport; rt = ip_route_output_key(vxlan->net, &fl4); if (likely(!IS_ERR(rt))) { @@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, __be32 label, const struct in6_addr *daddr, struct in6_addr *saddr, + __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = dport; + fl6.fl6_sport = sport; err = ipv6_stub->ipv6_dst_lookup(vxlan->net, sock6->sock->sk, @@ -1946,7 +1951,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan, union vxlan_addr *daddr, - __be32 dst_port, __be32 vni, struct dst_entry *dst, + __be16 dst_port, __be32 vni, struct dst_entry *dst, u32 rt_flags) { #if IS_ENABLED(CONFIG_IPV6) @@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, &src->sin.sin_addr.s_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(rt)) { err = PTR_ERR(rt); @@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, label, &dst->sin6.sin6_addr, &src->sin6.sin6_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); @@ -2261,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg) = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; - if (f->state & NUD_PERMANENT) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) continue; timeout = f->used + vxlan->cfg.age_interval * HZ; @@ -2347,7 +2354,7 @@ static int vxlan_open(struct net_device *dev) } /* Purge the forwarding table */ -static void vxlan_flush(struct vxlan_dev *vxlan) +static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) { unsigned int h; @@ -2357,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan) hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); + if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) + continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) vxlan_fdb_destroy(vxlan, f); @@ -2378,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev) del_timer_sync(&vxlan->age_timer); - vxlan_flush(vxlan); + vxlan_flush(vxlan, false); vxlan_sock_release(vxlan); return ret; @@ -2430,7 +2439,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, info->key.u.ipv4.dst, - &info->key.u.ipv4.src, NULL, info); + &info->key.u.ipv4.src, dport, sport, NULL, info); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -2441,7 +2450,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, NULL, info); + &info->key.u.ipv6.src, dport, sport, NULL, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); @@ -2883,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, memcpy(&vxlan->cfg, conf, sizeof(*conf)); if (!vxlan->cfg.dst_port) { if (conf->flags & VXLAN_F_GPE) - vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ + vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */ else vxlan->cfg.dst_port = default_port; } @@ -3051,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + vxlan_flush(vxlan, true); + spin_lock(&vn->sock_lock); if (!hlist_unhashed(&vxlan->hlist)) hlist_del_rcu(&vxlan->hlist); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index e38ce4da3efb..d869533d2e79 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget) howmany += hdlc_rx_done(priv, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); qe_setbits32(priv->uccf->p_uccm, (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); } diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 7ef49dab6855..cff0cfadd650 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget) received = sca_rx_done(port, budget); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); enable_intr(port); } diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 12e67c46ea0a..2743a9bcd556 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2458,8 +2458,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) u32 phymode = __le32_to_cpu(resp->chan_change.phymode); u32 freq = __le32_to_cpu(resp->chan_change.freq); - ar->tgt_oper_chan = - __ieee80211_get_channel(ar->hw->wiphy, freq); + ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt chan change freq %u phymode %s\n", freq, ath10k_wmi_phymode_str(phymode)); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index d2aa9e5ec05d..79e61459bc6c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2799,7 +2799,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) done = ath10k_htt_txrx_compl_task(ar, budget); if (done < budget) { - napi_complete(ctx); + napi_complete_done(ctx, done); /* In case of MSI, it is possible that interrupts are received * while NAPI poll is inprogress. So pending interrupts that are * received after processing all copy engine pipes by NAPI poll diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 1843d98136d9..708facd5f667 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -69,7 +69,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) done = budget - quota; if (done < budget) { - napi_complete(napi); + napi_complete_done(napi, done); wil6210_unmask_irq_rx(wil); wil_dbg_txrx(wil, "NAPI RX complete\n"); } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 145cc4b5103b..1e3bd435a694 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) ie_len = ie_buf[1] + sizeof(struct ieee_types_header); band = mwifiex_band_to_radio_type(priv->curr_bss_params.band); - chan = __ieee80211_get_channel(priv->wdev.wiphy, + chan = ieee80211_get_channel(priv->wdev.wiphy, ieee80211_channel_to_frequency(bss_info.bss_chan, band)); diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 44bdb2bdeeb1..4d989b8ab185 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1065,6 +1065,7 @@ int rtl_usb_probe(struct usb_interface *intf, return -ENOMEM; } rtlpriv = hw->priv; + rtlpriv->hw = hw; rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32), GFP_KERNEL); if (!rtlpriv->usb_data) diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e30ffd29b7e9..1073b27e54aa 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(queue, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); xenvif_napi_schedule_or_enable_events(queue); } @@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned int num_queues = vif->num_queues; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long tx_bytes = 0; unsigned long tx_packets = 0; unsigned int index; + spin_lock(&vif->lock); if (vif->queues == NULL) goto out; /* Aggregate tx and rx stats from each queue */ - for (index = 0; index < num_queues; ++index) { + for (index = 0; index < vif->num_queues; ++index) { queue = &vif->queues[index]; rx_bytes += queue->stats.rx_bytes; rx_packets += queue->stats.rx_packets; @@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) } out: + spin_unlock(&vif->lock); + vif->dev->stats.rx_bytes = rx_bytes; vif->dev->stats.rx_packets = rx_packets; vif->dev->stats.tx_bytes = tx_bytes; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 3124eaec9427..85b742e1c42f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { if (be->vif) { + unsigned int queue_index; + xen_unregister_watchers(be->vif); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(be->vif); #endif /* CONFIG_DEBUG_FS */ xenvif_disconnect_data(be->vif); + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); + + spin_lock(&be->vif->lock); + vfree(be->vif->queues); + be->vif->num_queues = 0; + be->vif->queues = NULL; + spin_unlock(&be->vif->lock); + xenvif_disconnect_ctrl(be->vif); } } @@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be) err: if (be->vif->num_queues > 0) xenvif_disconnect_data(be->vif); /* Clean up existing queues */ + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); vfree(be->vif->queues); be->vif->queues = NULL; be->vif->num_queues = 0; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 40f26b69beb1..cf82b5b42056 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } @@ -1051,7 +1051,7 @@ err: if (work_done < budget) { int more_to_do = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 6307088b375f..a518cb1b59d4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) { resource_size_t allocated = 0, available = 0; struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_common *ndns = to_ndns(dev); struct nd_mapping *nd_mapping; struct nvdimm_drvdata *ndd; struct nd_label_id label_id; @@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) u8 *uuid = NULL; int rc, i; - if (dev->driver || to_ndns(dev)->claim) + if (dev->driver || ndns->claim) return -EBUSY; if (is_namespace_pmem(dev)) { @@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) nd_namespace_pmem_set_resource(nd_region, nspm, val * nd_region->ndr_mappings); - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - /* - * Try to delete the namespace if we deleted all of its - * allocation, this is not the seed device for the - * region, and it is not actively claimed by a btt - * instance. - */ - if (val == 0 && nd_region->ns_seed != dev - && !nsblk->common.claim) - nd_device_unregister(dev, ND_ASYNC); } + /* + * Try to delete the namespace if we deleted all of its + * allocation, this is not the seed device for the region, and + * it is not actively claimed by a btt instance. + */ + if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) + nd_device_unregister(dev, ND_ASYNC); + return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7282d7495bf1..5b536be5a12e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off, rc = memcpy_from_pmem(mem + off, pmem_addr, len); kunmap_atomic(mem); - return rc; + if (rc) + return -EIO; + return 0; } static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2fc86dc7a8df..8a3c3e32a704 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) if (ret) return ret; - /* Checking for ctrl->tagset is a trick to avoid sleeping on module - * load, since we only need the quirk on reset_controller. Notice - * that the HGST device needs this delay only in firmware activation - * procedure; unfortunately we have no (easy) way to verify this. - */ - if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset) + if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) msleep(NVME_QUIRK_DELAY_AMOUNT); return nvme_wait_ready(ctrl, cap, false); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index aa0bc60810a7..e65041c640cb 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1654,23 +1654,22 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) { struct nvmefc_fcp_req *freq = &op->fcp_req; - u32 map_len = nvme_map_len(rq); enum dma_data_direction dir; int ret; freq->sg_cnt = 0; - if (!map_len) + if (!blk_rq_payload_bytes(rq)) return 0; freq->sg_table.sgl = freq->first_sgl; - ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, - freq->sg_table.sgl); + ret = sg_alloc_table_chained(&freq->sg_table, + blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); if (ret) return -ENOMEM; op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); - WARN_ON(op->nents > rq->nr_phys_segments); + WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, dir); @@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret) return ret; - data_len = nvme_map_len(rq); + data_len = blk_rq_payload_bytes(rq); if (data_len) io_dir = ((rq_data_dir(rq) == WRITE) ? NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 6377e14586dc..aead6d08ed2c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) return (sector >> (ns->lba_shift - 9)); } -static inline unsigned nvme_map_len(struct request *rq) -{ - if (req_op(rq) == REQ_OP_DISCARD) - return sizeof(struct nvme_dsm_range); - else - return blk_rq_bytes(rq); -} - static inline void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 19beeb7b2ac2..3faefabf339c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req) return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); } -static int nvme_init_iod(struct request *rq, unsigned size, - struct nvme_dev *dev) +static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) { struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); int nseg = blk_rq_nr_phys_segments(rq); + unsigned int size = blk_rq_payload_bytes(rq); if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); @@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif -static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, - int total_len) +static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; - int length = total_len; + int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sg; int dma_len = sg_dma_len(sg); u64 dma_addr = sg_dma_address(sg); @@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, } static int nvme_map_data(struct nvme_dev *dev, struct request *req, - unsigned size, struct nvme_command *cmnd) + struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct request_queue *q = req->q; @@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req, DMA_ATTR_NO_WARN)) goto out; - if (!nvme_setup_prps(dev, req, size)) + if (!nvme_setup_prps(dev, req)) goto out_unmap; ret = BLK_MQ_RQ_QUEUE_ERROR; @@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_dev *dev = nvmeq->dev; struct request *req = bd->rq; struct nvme_command cmnd; - unsigned map_len; int ret = BLK_MQ_RQ_QUEUE_OK; /* @@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret != BLK_MQ_RQ_QUEUE_OK) return ret; - map_len = nvme_map_len(req); - ret = nvme_init_iod(req, map_len, dev); + ret = nvme_init_iod(req, dev); if (ret != BLK_MQ_RQ_QUEUE_OK) goto out_free_cmd; if (blk_rq_nr_phys_segments(req)) - ret = nvme_map_data(dev, req, map_len, &cmnd); + ret = nvme_map_data(dev, req, &cmnd); if (ret != BLK_MQ_RQ_QUEUE_OK) goto out_cleanup_iod; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f587af345889..557f29b1f1bb 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, } static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, - struct request *rq, unsigned int map_len, - struct nvme_command *c) + struct request *rq, struct nvme_command *c) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_device *dev = queue->device; @@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, } if (count == 1) { - if (rq_data_dir(rq) == WRITE && - map_len <= nvme_rdma_inline_data_size(queue) && - nvme_rdma_queue_idx(queue)) + if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && + blk_rq_payload_bytes(rq) <= + nvme_rdma_inline_data_size(queue)) return nvme_rdma_map_sg_inline(queue, req, c); if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) @@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) { if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { - struct nvme_command *cmd = (struct nvme_command *)rq->cmd; + struct nvme_command *cmd = nvme_req(rq)->cmd; if (rq->cmd_type != REQ_TYPE_DRV_PRIV || cmd->common.opcode != nvme_fabrics_command || @@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_command *c = sqe->data; bool flush = false; struct ib_device *dev; - unsigned int map_len; int ret; WARN_ON_ONCE(rq->tag < 0); @@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(rq); - map_len = nvme_map_len(rq); - ret = nvme_rdma_map_data(queue, rq, map_len, c); + ret = nvme_rdma_map_data(queue, rq, c); if (ret < 0) { dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", ret); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 6f5074153dcd..be8c800078e2 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item) { struct nvmet_subsys *subsys = to_subsys(item); + nvmet_subsys_del_ctrls(subsys); nvmet_subsys_put(subsys); } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b1d66ed655c9..fc5ba2f9e15f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", ctrl->cntlid, ctrl->kato); - ctrl->ops->delete_ctrl(ctrl); + nvmet_ctrl_fatal_error(ctrl); } static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) @@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); + flush_work(&ctrl->async_event_work); + cancel_work_sync(&ctrl->fatal_err_work); + ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); nvmet_subsys_put(subsys); @@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref) kfree(subsys); } +void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) +{ + struct nvmet_ctrl *ctrl; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + ctrl->ops->delete_ctrl(ctrl); + mutex_unlock(&subsys->lock); +} + void nvmet_subsys_put(struct nvmet_subsys *subsys) { kref_put(&subsys->ref, nvmet_subsys_free); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 173e842f19c9..ba57f9852bde 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; struct fcnvme_ls_disconnect_acc *acc = (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; - struct nvmet_fc_tgt_queue *queue; + struct nvmet_fc_tgt_queue *queue = NULL; struct nvmet_fc_tgt_assoc *assoc; int ret = 0; bool del_assoc = false; @@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); iod->assoc = assoc; - if (!assoc) + if (assoc) { + if (rqst->discon_cmd.scope == + FCNVME_DISCONN_CONNECTION) { + queue = nvmet_fc_find_target_queue(tgtport, + be64_to_cpu( + rqst->discon_cmd.id)); + if (!queue) { + nvmet_fc_tgt_a_put(assoc); + ret = VERR_NO_CONN; + } + } + } else ret = VERR_NO_ASSOC; } @@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, FCNVME_LS_DISCONNECT); - if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { - queue = nvmet_fc_find_target_queue(tgtport, - be64_to_cpu(rqst->discon_cmd.id)); - if (queue) { - int qid = queue->qid; + /* are we to delete a Connection ID (queue) */ + if (queue) { + int qid = queue->qid; - nvmet_fc_delete_target_queue(queue); + nvmet_fc_delete_target_queue(queue); - /* release the get taken by find_target_queue */ - nvmet_fc_tgt_q_put(queue); + /* release the get taken by find_target_queue */ + nvmet_fc_tgt_q_put(queue); - /* tear association down if io queue terminated */ - if (!qid) - del_assoc = true; - } + /* tear association down if io queue terminated */ + if (!qid) + del_assoc = true; } /* release get taken in nvmet_fc_find_target_assoc */ diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 23d5eb1c944f..cc7ad06b43a7 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type); void nvmet_subsys_put(struct nvmet_subsys *subsys); +void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); void nvmet_put_namespace(struct nvmet_ns *ns); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 8c3760a78ac0..60990220bd83 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, { struct ib_recv_wr *bad_wr; + ib_dma_sync_single_for_device(ndev->device, + cmd->sge[0].addr, cmd->sge[0].length, + DMA_FROM_DEVICE); + if (ndev->srq) return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); @@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) first_wr = &rsp->send_wr; nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); + + ib_dma_sync_single_for_device(rsp->queue->dev->device, + rsp->send_sge.addr, rsp->send_sge.length, + DMA_TO_DEVICE); + if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); @@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->n_rdma = 0; cmd->req.port = queue->port; + + ib_dma_sync_single_for_cpu(queue->dev->device, + cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, + DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(queue->dev->device, + cmd->send_sge.addr, cmd->send_sge.length, + DMA_TO_DEVICE); + if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 1f38d0836751..f1b633bce525 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev) rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", xgene_msi_hwirq_alloc, NULL); - if (rc) + if (rc < 0) goto err_cpuhp; pci_xgene_online = rc; rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index bed19994c1e9..af8f6e92e885 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; - /* get iATU unroll support */ - pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); - dev_dbg(pp->dev, "iATU unroll: %s\n", - pp->iatu_unroll_enabled ? "enabled" : "disabled"); - /* set the number of lanes */ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; @@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * we should not program the ATU here. */ if (!pp->ops->rd_other_conf) { + /* get iATU unroll support */ + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); + dev_dbg(pp->dev, "iATU unroll: %s\n", + pp->iatu_unroll_enabled ? "enabled" : "disabled"); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1ccce1cd6aca..63d8e18fb6b1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static int pci_bus_num_vf(struct device *dev) +{ + return pci_num_vf(to_pci_dev(dev)); +} + struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, @@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = { .bus_groups = pci_bus_groups, .drv_groups = pci_drv_groups, .pm = PCI_PM_OPS_PTR, + .num_vf = pci_bus_num_vf, }; EXPORT_SYMBOL(pci_bus_type); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e164b5c9f0f0..204960e70333 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev) pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; + pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_flags_reg = reg16; @@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; /* - * A Root Port is always the upstream end of a Link. No PCIe - * component has two Links. Two Links are connected by a Switch - * that has a Port on each Link and internal logic to connect the - * two Ports. + * A Root Port or a PCI-to-PCIe bridge is always the upstream end + * of a Link. No PCIe component has two Links. Two Links are + * connected by a Switch that has a Port on each Link and internal + * logic to connect the two Ports. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT) + if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_PCIE_BRIDGE) pdev->has_secondary_link = 1; else if (type == PCI_EXP_TYPE_UPSTREAM || type == PCI_EXP_TYPE_DOWNSTREAM) { diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 37300634b7d2..c123488266ce 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, enum pin_config_param param = pinconf_to_config_param(*config); void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); unsigned long flags; u32 conf, pull, val, debounce; u16 arg = 0; @@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, return -EINVAL; raw_spin_lock_irqsave(&vg->lock, flags); - debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); + debounce = readl(db_reg); raw_spin_unlock_irqrestore(&vg->lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { @@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, unsigned int param, arg; void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); unsigned long flags; u32 conf, val, debounce; int i, ret = 0; @@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, break; case PIN_CONFIG_INPUT_DEBOUNCE: - debounce = readl(byt_gpio_reg(vg, offset, - BYT_DEBOUNCE_REG)); - conf &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce = readl(db_reg); + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; switch (arg) { + case 0: + conf &= BYT_DEBOUNCE_EN; + break; case 375: - conf |= BYT_DEBOUNCE_PULSE_375US; + debounce |= BYT_DEBOUNCE_PULSE_375US; break; case 750: - conf |= BYT_DEBOUNCE_PULSE_750US; + debounce |= BYT_DEBOUNCE_PULSE_750US; break; case 1500: - conf |= BYT_DEBOUNCE_PULSE_1500US; + debounce |= BYT_DEBOUNCE_PULSE_1500US; break; case 3000: - conf |= BYT_DEBOUNCE_PULSE_3MS; + debounce |= BYT_DEBOUNCE_PULSE_3MS; break; case 6000: - conf |= BYT_DEBOUNCE_PULSE_6MS; + debounce |= BYT_DEBOUNCE_PULSE_6MS; break; case 12000: - conf |= BYT_DEBOUNCE_PULSE_12MS; + debounce |= BYT_DEBOUNCE_PULSE_12MS; break; case 24000: - conf |= BYT_DEBOUNCE_PULSE_24MS; + debounce |= BYT_DEBOUNCE_PULSE_24MS; break; default: ret = -EINVAL; } + if (!ret) + writel(debounce, db_reg); break; default: ret = -ENOTSUPP; @@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) static void byt_gpio_irq_init_hw(struct byt_gpio *vg) { + struct gpio_chip *gc = &vg->chip; + struct device *dev = &vg->pdev->dev; void __iomem *reg; u32 base, value; int i; @@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) } value = readl(reg); - if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && - !(value & BYT_DIRECT_IRQ_EN)) { + if (value & BYT_DIRECT_IRQ_EN) { + clear_bit(i, gc->irq_valid_mask); + dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i); + } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) { byt_gpio_clear_triggering(vg, i); - dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); + dev_dbg(dev, "disabling GPIO %d\n", i); } } @@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) gc->can_sleep = false; gc->parent = &vg->pdev->dev; gc->ngpio = vg->soc_data->npins; + gc->irq_need_valid_mask = true; #ifdef CONFIG_PM_SLEEP vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index 59cb7a6fc5be..901b356b09d7 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c @@ -19,7 +19,7 @@ #define BXT_PAD_OWN 0x020 #define BXT_HOSTSW_OWN 0x080 -#define BXT_PADCFGLOCK 0x090 +#define BXT_PADCFGLOCK 0x060 #define BXT_GPI_IE 0x110 #define BXT_COMMUNITY(s, e) \ diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1e139672f1af..6df35dcb29ae 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, return 0; } +static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) +{ + u32 value; + + value = readl(padcfg0); + if (input) { + value &= ~PADCFG0_GPIORXDIS; + value |= PADCFG0_GPIOTXDIS; + } else { + value &= ~PADCFG0_GPIOTXDIS; + value |= PADCFG0_GPIORXDIS; + } + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - /* Disable TX buffer and enable RX (this will be input) */ - value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(padcfg0, true); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - - value = readl(padcfg0); - if (input) - value |= PADCFG0_GPIOTXDIS; - else - value &= ~PADCFG0_GPIOTXDIS; - writel(value, padcfg0); + __intel_gpio_set_direction(padcfg0, input); raw_spin_unlock_irqrestore(&pctrl->lock, flags); diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index c3928aa3fefa..e0bca4df2a2f 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; -static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; -static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), - PIN(GPIOAO_5, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; @@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = { GPIO_GROUP(GPIOAO_13, 0), /* bank AO */ - GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_tx_ao_b, 0, 24), GROUP(uart_rx_ao_b, 0, 25), GROUP(uart_tx_ao_a, 0, 12), GROUP(uart_rx_ao_a, 0, 11), diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 25694f7094c7..b69743b07a1d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; -static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; -static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), - PIN(GPIOAO_5, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; @@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = { GPIO_GROUP(GPIOAO_9, 0), /* bank AO */ - GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_tx_ao_b, 0, 24), GROUP(uart_rx_ao_b, 0, 25), GROUP(uart_tx_ao_a, 0, 12), GROUP(uart_rx_ao_a, 0, 11), diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index c9a146948192..537b52055756 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc) i = 128; pin_num = AMD_GPIO_PINS_BANK2 + i; break; + default: + return; } for (; i < pin_num; i++) { diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c index aa8bd9794683..96686336e3a3 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c @@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 42, 45}; -static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; +static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}; static const unsigned i2c0_pins[] = {63, 64}; static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {65, 66}; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 410741acb3c9..f46ece2ce3c4 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) case 8: case 7: case 6: + case 1: ideapad_input_report(priv, vpc_bit); break; case 5: diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 1fc0de870ff8..361770568ad0 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 97b4c3a219c0..25f15df5c2d7 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -326,7 +326,7 @@ static int __init mlxplat_init(void) return 0; fail_platform_mux_register: - for (i--; i > 0 ; i--) + while (--i >= 0) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); fail_alloc: diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c index cbf4d83a7271..25b176996cb7 100644 --- a/drivers/platform/x86/surface3-wmi.c +++ b/drivers/platform/x86/surface3-wmi.c @@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle, static int s3_wmi_check_platform_device(struct device *dev, void *data) { - struct acpi_device *adev, *ts_adev; + struct acpi_device *adev, *ts_adev = NULL; acpi_handle handle; acpi_status status; @@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device) return 0; } -#ifdef CONFIG_PM -static int s3_wmi_resume(struct device *dev) +static int __maybe_unused s3_wmi_resume(struct device *dev) { s3_wmi_send_lid_state(); return 0; } -#endif static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); static struct platform_driver s3_wmi_driver = { diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9a507e77eced..90b05c72186c 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, goto unwind_vring_allocations; } - /* track the rvdevs list reference */ - kref_get(&rvdev->refcount); - list_add_tail(&rvdev->node, &rproc->rvdevs); rproc_add_subdev(rproc, &rvdev->subdev, @@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) /* * Create a copy of the resource table. When a virtio device starts * and calls vring_new_virtqueue() the address of the allocated vring - * will be stored in the table_ptr. Before the device is started, - * table_ptr will be copied into device memory. + * will be stored in the cached_table. Before the device is started, + * cached_table will be copied into device memory. */ - rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL); - if (!rproc->table_ptr) + rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL); + if (!rproc->cached_table) goto clean_up; + rproc->table_ptr = rproc->cached_table; + /* reset max_notifyid */ rproc->max_notifyid = -1; @@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) } /* - * The starting device has been given the rproc->table_ptr as the + * The starting device has been given the rproc->cached_table as the * resource table. The address of the vring along with the other - * allocated resources (carveouts etc) is stored in table_ptr. + * allocated resources (carveouts etc) is stored in cached_table. * In order to pass this information to the remote device we must copy * this information to device memory. We also update the table_ptr so * that any subsequent changes will be applied to the loaded version. */ loaded_table = rproc_find_loaded_rsc_table(rproc, fw); - if (loaded_table) - memcpy(loaded_table, rproc->table_ptr, tablesz); + if (loaded_table) { + memcpy(loaded_table, rproc->cached_table, tablesz); + rproc->table_ptr = loaded_table; + } /* power up the remote processor */ ret = rproc->ops->start(rproc); @@ -951,7 +952,8 @@ stop_rproc: clean_up_resources: rproc_resource_cleanup(rproc); clean_up: - kfree(rproc->table_ptr); + kfree(rproc->cached_table); + rproc->cached_table = NULL; rproc->table_ptr = NULL; rproc_disable_iommu(rproc); @@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc) rproc_disable_iommu(rproc); /* Free the copy of the resource table */ - kfree(rproc->table_ptr); + kfree(rproc->cached_table); + rproc->cached_table = NULL; rproc->table_ptr = NULL; /* if in crash state, unlock crash handler */ diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index a79cb5a9e5f2..1cfb775e8e82 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev) struct device *dev = &rpdev->dev; int ret; - dev_set_name(&rpdev->dev, "%s:%s", - dev_name(dev->parent), rpdev->id.name); + dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent), + rpdev->id.name, rpdev->src, rpdev->dst); rpdev->dev.bus = &rpmsg_bus; rpdev->dev.release = rpmsg_release_device; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6d4b68c483f3..e7addea8741b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_HIGH_WATERMARK_PACK 5 #define QETH_WATERMARK_PACK_FUZZ 1 -#define QETH_IP_HEADER_SIZE 40 - /* large receive scatter gather copy break */ #define QETH_RX_SG_CB (PAGE_SIZE >> 1) #define QETH_RX_PULL_LEN 256 @@ -674,8 +672,6 @@ struct qeth_card_info { int broadcast_capable; int unique_id; struct qeth_card_blkt blkt; - __u32 csum_mask; - __u32 tx_csum_mask; enum qeth_ipa_promisc_modes promisc_mode; __u32 diagass_support; __u32 hwtrap; @@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); -int qeth_send_startlan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e33558313834..315d8a2db7c0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -int qeth_send_startlan(struct qeth_card *card) +static int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -5087,6 +5086,20 @@ retriable: goto out; } + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5098,14 +5111,14 @@ retriable: if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out; } } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out; } } @@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card, if (cmd->hdr.prot_version == QETH_PROT_IPV6) card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } - if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); - } - if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.tx_csum_mask = - cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); - } - return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); +/* Callback to handle checksum offload command reply from OSA card. + * Verify that required features have been enabled on the card. + * Return error in hdr->return_code as this value is checked by caller. + * + * Always returns zero to indicate no further messages from the OSA card. + */ +static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_checksum_cmd *chksum_cb = + (struct qeth_checksum_cmd *)reply->param; + + QETH_CARD_TEXT(card, 4, "chkdoccb"); + if (cmd->hdr.return_code) + return 0; + + memset(chksum_cb, 0, sizeof(*chksum_cb)); + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); + } + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + chksum_cb->enabled = + cmd->data.setassparms.data.chksum.enabled; + QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); + QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); + } + return 0; +} + +/* Send command to OSA card and check results. */ +static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, long data, + struct qeth_checksum_cmd *chksum_cb) +{ + struct qeth_cmd_buffer *iob; + int rc = -ENOMEM; + + QETH_CARD_TEXT(card, 4, "chkdocmd"); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + sizeof(__u32), QETH_PROT_IPV4); + if (iob) + rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, + qeth_ipa_checksum_run_cmd_cb, + chksum_cb); + return rc; +} + static int qeth_send_checksum_on(struct qeth_card *card, int cstype) { - long rxtx_arg; + const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR | + QETH_IPA_CHECKSUM_UDP | + QETH_IPA_CHECKSUM_TCP; + struct qeth_checksum_cmd chksum_cb; int rc; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, + &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.supported) != + required_features) + rc = -EIO; + else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && + cstype == IPA_INBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); + } if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Starting HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); return rc; } - rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask - : card->info.csum_mask; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE, - rxtx_arg); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, + chksum_cb.supported, &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.enabled) != + required_features) + rc = -EIO; + } if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); @@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) { - int rc; - - if (on) { - rc = qeth_send_checksum_on(card, cstype); - if (rc) - return -EIO; - } else { - rc = qeth_send_simple_setassparms(card, cstype, - IPA_CMD_ASS_STOP, 0); - if (rc) - return -EIO; - } - return 0; + int rc = (on) ? qeth_send_checksum_on(card, cstype) + : qeth_send_simple_setassparms(card, cstype, + IPA_CMD_ASS_STOP, 0); + return rc ? -EIO : 0; } static int qeth_set_ipa_tso(struct qeth_card *card, int on) diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 6cccc9a49ede..bc69d0a338ad 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -352,11 +352,28 @@ struct qeth_arp_query_info { char *udata; }; +/* IPA set assist segmentation bit definitions for receive and + * transmit checksum offloading. + */ +enum qeth_ipa_checksum_bits { + QETH_IPA_CHECKSUM_IP_HDR = 0x0002, + QETH_IPA_CHECKSUM_UDP = 0x0008, + QETH_IPA_CHECKSUM_TCP = 0x0010, + QETH_IPA_CHECKSUM_LP2LP = 0x0020 +}; + +/* IPA Assist checksum offload reply layout. */ +struct qeth_checksum_cmd { + __u32 supported; + __u32 enabled; +} __packed; + /* SETASSPARMS IPA Command: */ struct qeth_ipacmd_setassparms { struct qeth_ipacmd_setassparms_hdr hdr; union { __u32 flags_32bit; + struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry add_arp_entry; struct qeth_arp_query_data query_arp; __u8 ip[16]; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9c921c2833f1..bea483307618 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,9 +27,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); -static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, - enum qeth_ipa_cmds); static void qeth_l2_set_rx_mode(struct net_device *); static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode) return rc; } +static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "L2sdmac"); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; + memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); + return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); +} + +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); + if (rc == 0) { + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + card->dev->dev_addr, card->dev->name); + } else { + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + switch (rc) { + case -EEXIST: + dev_warn(&card->gdev->dev, + "MAC address %pM already exists\n", mac); + break; + case -EPERM: + dev_warn(&card->gdev->dev, + "MAC address %pM is not authorized\n", mac); + break; + } + } + return rc; +} + +static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Delmac"); + if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); + if (rc == 0) + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + return rc; +} + static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { int rc; QETH_CARD_TEXT(card, 2, "L2Sgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC); if (rc == -EEXIST) QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", mac, QETH_CARD_IFNAME(card)); @@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) int rc; QETH_CARD_TEXT(card, 2, "L2Dgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC); if (rc) QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %d\n", @@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) return rc; } -static inline u32 qeth_l2_mac_hash(const u8 *addr) +static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) { - return get_unaligned((u32 *)(&addr[2])); + if (mac->is_uc) { + return qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_SETVMAC); + } else { + return qeth_l2_send_setgroupmac(card, mac->mac_addr); + } } -static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) +static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac) { - - int rc; - if (mac->is_uc) { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_SETVMAC)); + return qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_DELVMAC); } else { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setgroupmac(card, mac->mac_addr)); + return qeth_l2_send_delgroupmac(card, mac->mac_addr); } - return rc; } -static void qeth_l2_del_all_macs(struct qeth_card *card, int del) +static void qeth_l2_del_all_macs(struct qeth_card *card) { struct qeth_mac *mac; struct hlist_node *tmp; @@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del) spin_lock_bh(&card->mclock); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { - if (del) { - if (mac->is_uc) - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - else - qeth_l2_send_delgroupmac(card, mac->mac_addr); - } hash_del(&mac->hnode); kfree(mac); } spin_unlock_bh(&card->mclock); } +static inline u32 qeth_l2_mac_hash(const u8 *addr) +{ + return get_unaligned((u32 *)(&addr[2])); +} + static inline int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { @@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_del_all_macs(card, 0); + qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } @@ -577,65 +627,6 @@ out: return work_done; } -static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, - enum qeth_ipa_cmds ipacmd) -{ - struct qeth_ipa_cmd *cmd; - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 2, "L2sdmac"); - iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); - if (!iob) - return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; - memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_send_ipa_cmd(card, iob, NULL, NULL); -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Setmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETVMAC)); - if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); - dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); - } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - switch (rc) { - case -EEXIST: - dev_warn(&card->gdev->dev, - "MAC address %pM already exists\n", mac); - break; - case -EPERM: - dev_warn(&card->gdev->dev, - "MAC address %pM is not authorized\n", mac); - break; - } - } - return rc; -} - -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELVMAC)); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; @@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { if (mac->disp_flag == QETH_DISP_ADDR_DELETE) { - if (!mac->is_uc) - rc = qeth_l2_send_delgroupmac(card, - mac->mac_addr); - else { - rc = qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - } - + qeth_l2_remove_mac(card, mac); hash_del(&mac->hnode); kfree(mac); @@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { rc = qeth_l2_start_ipassists(card); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ac37d050e765..06d0addcc058 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 0e00a5ce0f00..05e9471e3d3f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; - if (card->state == CARD_STATE_DOWN) - return -EPERM; - memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); EBCASC(tmp_hsuid, 8); return sprintf(buf, "%s\n", tmp_hsuid); @@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_VIPA) @@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, @@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_RXIP) @@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 639ed4e6afd1..070c4da95f48 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define CCW_CMD_WRITE_CONF 0x21 #define CCW_CMD_WRITE_STATUS 0x31 #define CCW_CMD_READ_VQ_CONF 0x32 +#define CCW_CMD_READ_STATUS 0x72 #define CCW_CMD_SET_IND_ADAPTER 0x73 #define CCW_CMD_SET_VIRTIO_REV 0x83 @@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 +#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) @@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) * This may happen on device detach. */ if (ret && (ret != -ENODEV)) - dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", + dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", ret, index); vring_del_virtqueue(vq); @@ -892,6 +894,28 @@ out_free: static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + u8 old_status = *vcdev->status; + struct ccw1 *ccw; + + if (vcdev->revision < 1) + return *vcdev->status; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return old_status; + + ccw->cmd_code = CCW_CMD_READ_STATUS; + ccw->flags = 0; + ccw->count = sizeof(*vcdev->status); + ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); +/* + * If the channel program failed (should only happen if the device + * was hotunplugged, and then we clean up via the machine check + * handler anyway), vcdev->status was not overwritten and we just + * return the old status, which is fine. +*/ + kfree(ccw); return *vcdev->status; } @@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) kfree(ccw); } -static struct virtio_config_ops virtio_ccw_config_ops = { +static const struct virtio_config_ops virtio_ccw_config_ops = { .get_features = virtio_ccw_get_features, .finalize_features = virtio_ccw_finalize_features, .get = virtio_ccw_get_config, @@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, case VIRTIO_CCW_DOING_READ_CONFIG: case VIRTIO_CCW_DOING_WRITE_CONFIG: case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_READ_STATUS: case VIRTIO_CCW_DOING_SET_VQ: case VIRTIO_CCW_DOING_SET_IND: case VIRTIO_CCW_DOING_SET_CONF_IND: diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index d9e15210b110..5caf5f3ff642 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" -#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" -#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" +#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index a9a00169ad91..b2e8c0dfc79c 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job) struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; - struct fc_bsg_request *bsg_request = bsg_request; + struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t command_type = bsg_request->msgcode; unsigned long flags; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index f9e862093a25..cfcfff48e8e1 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -58,7 +58,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.2.25.0" +#define BFAD_DRIVER_VERSION "3.2.25.1" #endif #define BFAD_PROTO_NAME FCPI_NAME diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 9ddc9200e0a4..9e4b7709043e 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -248,6 +248,7 @@ struct fnic { struct completion *remove_wait; /* device remove thread blocks */ atomic_t in_flight; /* io counter */ + bool internal_reset_inprogress; u32 _reserved; /* fill hole */ unsigned long state_flags; /* protected by host lock */ enum fnic_state state; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 2544a37ece0a..adb3d5871e74 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc) unsigned long wait_host_tmo; struct Scsi_Host *shost = sc->device->host; struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->internal_reset_inprogress == 0) { + fnic->internal_reset_inprogress = 1; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "host reset in progress skipping another host reset\n"); + return SUCCESS; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* * If fnic_reset is successful, wait for fabric login to complete @@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc) } } + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->internal_reset_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return ret; } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 3d3768aaab4f..99b747cedbeb 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -46,6 +46,7 @@ #define INITIAL_SRP_LIMIT 800 #define DEFAULT_MAX_SECTORS 256 +#define MAX_TXU 1024 * 1024 static uint max_vdma_size = MAX_H_COPY_RDMA; @@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, } info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, - GFP_KERNEL); + GFP_ATOMIC); if (!info) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); @@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, info->mad_version = cpu_to_be32(MAD_VERSION_1); info->os_type = cpu_to_be32(LINUX); memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); - info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); + info->port_max_txu[0] = cpu_to_be32(MAX_TXU); dma_wmb(); rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, @@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) } cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, - GFP_KERNEL); + GFP_ATOMIC); if (!cap) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); @@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) 1, 1); if (rc) { pr_err("srp_transfer_data() failed: %d\n", rc); - return -EAGAIN; + return -EIO; } /* * We now tell TCM to add this WRITE CDB directly into the TCM storage diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 236e4e51d161..7b6bd8ed0d0b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4faa7672fc1d..a78a3df68f67 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 394fe1338d09..dcb33f4fa687 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -393,6 +393,7 @@ struct MPT3SAS_TARGET { * @eedp_enable: eedp support enable bit * @eedp_type: 0(type_1), 1(type_2), 2(type_3) * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device */ struct MPT3SAS_DEVICE { struct MPT3SAS_TARGET *sas_target; @@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE { u8 ignore_delay_remove; /* Iopriority Command Handling */ u8 ncq_prio_enable; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b5c966e319d3..75f3fce1c867 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } -static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) { - return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; } /** @@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, - SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); - /* - * Lock the device for any subsequent command until command is - * done. - */ - if (ata_12_16_cmd(scmd)) - scsi_internal_device_block(scmd->device); - sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig index 23ca8a274586..21331453db7b 100644 --- a/drivers/scsi/qedi/Kconfig +++ b/drivers/scsi/qedi/Kconfig @@ -1,6 +1,6 @@ config QEDI tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support" - depends on PCI && SCSI + depends on PCI && SCSI && UIO depends on QED select SCSI_ISCSI_ATTRS select QED_LL2 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 47eb4d545d13..f201f4099620 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; + mutex_lock(&ha->optrom_mutex); + if (ha->optrom_state != QLA_SREADING) - return 0; + goto out; - mutex_lock(&ha->optrom_mutex); rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); + +out: mutex_unlock(&ha->optrom_mutex); return rval; @@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - if (ha->optrom_state != QLA_SWRITING) + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); return -EINVAL; - if (off > ha->optrom_region_size) + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); return -ERANGE; + } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; - mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); @@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); int type; - int rval = 0; port_id_t did; type = simple_strtol(buf, NULL, 10); @@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); - rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); + qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); return count; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index f7df01b76714..5b1287a63c49 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1556,7 +1556,8 @@ typedef struct { struct atio { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ }; @@ -2732,7 +2733,7 @@ struct isp_operations { #define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) #define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) -#define QLA_MSIX_DEFAULT 0x00 +#define QLA_BASE_VECTORS 2 /* default + RSP */ #define QLA_MSIX_RSP_Q 0x01 #define QLA_ATIO_VECTOR 0x02 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 @@ -2754,7 +2755,6 @@ struct qla_msix_entry { uint16_t entry; char name[30]; void *handle; - struct irq_affinity_notify irq_notify; int cpuid; }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 632d5f30386a..7b6317c8c2e9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) /* Wait for soft-reset to complete. */ RD_REG_DWORD(®->ctrl_status); - for (cnt = 0; cnt < 6000000; cnt++) { + for (cnt = 0; cnt < 60; cnt++) { barrier(); if ((RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) @@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) RD_REG_DWORD(®->hccr); RD_REG_WORD(®->mailbox0); - for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 && + for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5093ca9b02ec..dc88a09f9043 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static void qla_irq_affinity_notify(struct irq_affinity_notify *, - const cpumask_t *); -static void qla_irq_affinity_release(struct kref *); - /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); @@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!vha->flags.online) return; - if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) { - /* if kernel does not notify qla of IRQ's CPU change, - * then set it here. - */ - rsp->msix->cpuid = smp_processor_id(); - ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid; - } - while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = { static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { -#define MIN_MSIX_COUNT 2 int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct irq_affinity desc = { + .pre_vectors = QLA_BASE_VECTORS, + }; + + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) + desc.pre_vectors++; + + ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS, + ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); - ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " @@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->have_irq = 0; qentry->in_use = 0; qentry->handle = NULL; - qentry->irq_notify.notify = qla_irq_affinity_notify; - qentry->irq_notify.release = qla_irq_affinity_release; - qentry->cpuid = -1; } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { + for (i = 0; i < QLA_BASE_VECTORS; i++) { qentry = &ha->msix_entries[i]; qentry->handle = rsp; rsp->msix = qentry; @@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) goto msix_register_fail; qentry->have_irq = 1; qentry->in_use = 1; - - /* Register for CPU affinity notification. */ - irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); - - /* Schedule work (ie. trigger a notification) to read cpu - * mask for this specific irq. - * kref_get is required because - * irq_affinity_notify() will do - * kref_put(). - */ - kref_get(&qentry->irq_notify.kref); - schedule_work(&qentry->irq_notify.work); } /* @@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, msix->handle = qpair; return ret; } - - -/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */ -static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct qla_hw_data *ha; - struct scsi_qla_host *base_vha; - struct rsp_que *rsp = e->handle; - - /* user is recommended to set mask to just 1 cpu */ - e->cpuid = cpumask_first(mask); - - ha = rsp->hw; - base_vha = pci_get_drvdata(ha->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host %ld : vector %d cpu %d \n", __func__, - base_vha->host_no, e->vector, e->cpuid); - - if (e->have_irq) { - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && - (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) { - ha->tgt.rspq_vector_cpuid = e->cpuid; - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: rspq vector %d cpu %d runtime change\n", - __func__, base_vha->host_no, e->vector, e->cpuid); - } - } -} - -static void qla_irq_affinity_release(struct kref *ref) -{ - struct irq_affinity_notify *notify = - container_of(ref, struct irq_affinity_notify, kref); - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct rsp_que *rsp = e->handle; - struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: vector %d cpu %d\n", __func__, - base_vha->host_no, e->vector, e->cpuid); -} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2819ceb96041..67f64db390b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,7 +10,7 @@ #include <linux/delay.h> #include <linux/gfp.h> -struct rom_cmd { +static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { { MBC_LOAD_RAM }, @@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - /* if PCI error, then avoid mbx processing.*/ - if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x1191, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; - } + } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } } else { - uint16_t mb0; - uint32_t ictrl; + uint16_t mb[8]; + uint32_t ictrl, host_status, hccr; uint16_t w; if (IS_FWI2_CAPABLE(ha)) { - mb0 = RD_REG_WORD(®->isp24.mailbox0); + mb[0] = RD_REG_WORD(®->isp24.mailbox0); + mb[1] = RD_REG_WORD(®->isp24.mailbox1); + mb[2] = RD_REG_WORD(®->isp24.mailbox2); + mb[3] = RD_REG_WORD(®->isp24.mailbox3); + mb[7] = RD_REG_WORD(®->isp24.mailbox7); ictrl = RD_REG_DWORD(®->isp24.ictrl); + host_status = RD_REG_DWORD(®->isp24.host_status); + hccr = RD_REG_DWORD(®->isp24.hccr); + + ql_log(ql_log_warn, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", + command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], + mb[7], host_status, hccr); + } else { - mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); + mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); ictrl = RD_REG_WORD(®->isp.ictrl); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); } - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, - "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " - "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* Capture FW dump only, if PCI device active */ @@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - int configured_count; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, "Entered %s.\n", __func__); @@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); } else { - configured_count = mcp->mb[11]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, "Done %s.\n", __func__); } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 54380b434b30..0a1723cc08cf 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized; (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) +const int MD_MIU_TEST_AGT_RDDATA[] = { + 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC +}; + static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 6201dce3553b..77624eac95a4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue { #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 -static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, - 0x410000B8, 0x410000BC }; +extern const int MD_MIU_TEST_AGT_RDDATA[4]; #define CRB_NIU_XG_PAUSE_CTL_P0 0x1 #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 007192d7bad8..dc1ec9b61027 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -15,6 +15,23 @@ #define TIMEOUT_100_MS 100 +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + /* 8044 Flash Read/Write functions */ uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 02fe3c4cdf55..83c1b7e17c80 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -535,23 +535,6 @@ enum qla_regs { #define CRB_CMDPEG_CHECK_RETRY_COUNT 60 #define CRB_CMDPEG_CHECK_DELAY 500 -static const uint32_t qla8044_reg_tbl[] = { - QLA8044_PEG_HALT_STATUS1, - QLA8044_PEG_HALT_STATUS2, - QLA8044_PEG_ALIVE_COUNTER, - QLA8044_CRB_DRV_ACTIVE, - QLA8044_CRB_DEV_STATE, - QLA8044_CRB_DRV_STATE, - QLA8044_CRB_DRV_SCRATCH, - QLA8044_CRB_DEV_PART_INFO1, - QLA8044_CRB_IDC_VER_MAJOR, - QLA8044_FW_VER_MAJOR, - QLA8044_FW_VER_MINOR, - QLA8044_FW_VER_SUB, - QLA8044_CMDPEG_STATE, - QLA8044_ASIC_TEMP, -}; - /* MiniDump Structures */ /* Driver_code is for driver to write some info about the entry diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8521cfe302e9..0a000ecf0881 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) continue; rsp = ha->rsp_q_map[cnt]; - clear_bit(cnt, ha->req_qid_map); + clear_bit(cnt, ha->rsp_qid_map); ha->rsp_q_map[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_rsp_que(ha, rsp); @@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct ct6_dsd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ctx_cachep) - goto fail_free_gid_list; + goto fail_free_srb_mempool; } ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ctx_cachep); @@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), GFP_KERNEL); if (!ha->loop_id_map) - goto fail_async_pd; + goto fail_loop_id_map; else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, @@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, return 0; +fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: @@ -3851,6 +3853,10 @@ fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); @@ -3868,10 +3874,12 @@ fail_free_nvram: kfree(ha->nvram); ha->nvram = NULL; fail_free_ctx_mempool: - mempool_destroy(ha->ctx_mempool); + if (ha->ctx_mempool) + mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; fail_free_srb_mempool: - mempool_destroy(ha->srb_mempool); + if (ha->srb_mempool) + mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; fail_free_gid_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bff9689f5ca9..e4fda84b959e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; - uint32_t unpacked_lun, lun = 0; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; - struct atio_from_isp *a = (struct atio_from_isp *)iocb; unsigned long flags; loop_id = le16_to_cpu(n->u.isp24.nport_handle); @@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); - lun = a->u.isp24.fcp_cmnd.lun; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); - - return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, - iocb, QLA24XX_MGMT_SEND_NACK); + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } /* ha->tgt.sess_lock supposed to be held on entry */ @@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; @@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, #if 0 /* Todo */ if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#else + if (rc) { + } #endif goto done; } @@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) if (!vha->flags.online) return; - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, - ha_locked); + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xffff, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; @@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) /* Disable Full Login after LIP */ nv->host_p &= cpu_to_le32(~BIT_10); + + /* + * clear BIT 15 explicitly as we have seen at least + * a couple of instances where this was set and this + * was causing the firmware to not be initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); /* Enable target PRLI control */ nv->firmware_options_2 |= cpu_to_le32(BIT_14); } else { @@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = @@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); - /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_9); + /* + * clear BIT 15 explicitly as we have seen at + * least a couple of instances where this was set + * and this was causing the firmware to not be + * initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); if (ql2xtgt_tape_enable) /* Enable FC tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); @@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f60eedd..0824a8164a24 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -427,13 +427,33 @@ struct atio_from_isp { struct { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; } __packed; +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 36935c9ed669..8a58ef3adab4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring; + + if (atr || !buf) { + length = ha->tgt.atio_q_length; + qla27xx_insert16(0, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd026, "%s: unknown queue %x\n", __func__, ent->t263.queue_type); @@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring_ptr; + + if (atr || !buf) { + qla27xx_insert16(0, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(ha->tgt.atio_q_in ? + readl(ha->tgt.atio_q_in) : 0, buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, "%s: unknown queue %x\n", __func__, ent->t274.queue_type); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 6643f6fc7795..d925910be761 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, { return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); } @@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void) int ret; pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); ret = target_register_template(&tcm_qla2xxx_ops); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 37e026a4823d..cf8430be183b 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -1,7 +1,6 @@ #include <target/target_core_base.h> #include <linux/btree.h> -#define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 /* diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c35b6de4ca64..e9e1e141af9c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) count = blk_rq_map_sg(req->q, req, sdb->table.sgl); BUG_ON(count > sdb->table.nents); sdb->table.nents = count; - sdb->length = blk_rq_bytes(req); + sdb->length = blk_rq_payload_bytes(req); return BLKPREP_OK; } @@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev) * request queue. */ if (q->mq_ops) { - blk_mq_stop_hw_queues(q); + blk_mq_quiesce_queue(q); } else { spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b1933041da39..1f5d92a25a49 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -871,11 +871,11 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) cmd->allowed = SD_MAX_RETRIES; /* - * For WRITE_SAME the data transferred in the DATA IN buffer is + * For WRITE SAME the data transferred via the DATA OUT buffer is * different from the amount of data actually written to the target. * - * We set up __data_len to the amount of data transferred from the - * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list + * We set up __data_len to the amount of data transferred via the + * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list * to transfer a single sector of data first, but then reset it to * the amount of data to be written right after so that the I/O path * knows how much to actually write. @@ -2600,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; @@ -2783,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); } - sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) - q->limits.zoned = BLK_ZONED_HA; - else if (sdkp->device->type == TYPE_ZBC) + if (sdkp->device->type == TYPE_ZBC) { + /* Host-managed */ q->limits.zoned = BLK_ZONED_HM; - else - q->limits.zoned = BLK_ZONED_NONE; + } else { + sdkp->zoned = (buffer[8] >> 4) & 3; + if (sdkp->zoned == 1) + /* Host-aware */ + q->limits.zoned = BLK_ZONED_HA; + else + /* + * Treat drive-managed devices as + * regular block devices. + */ + q->limits.zoned = BLK_ZONED_NONE; + } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 8c9a35c91705..50adabbb5808 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - if (scsi_is_sas_rphy(&sdev->sdev_gendev)) + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); if (efd.addr) { diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 396b32dca074..7cf70aaec0ba 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); + ret = -ENOMEM; goto err_free_res; } @@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); + ret = -ENOMEM; goto err_free_dflt_sgl_pool; } @@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); + ret = -ENOMEM; goto err_free_max_sgl_pool; } diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 8823cc81ae45..5bb376009d98 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) if (IS_ERR(task)) { dev_err(dev, "can't create rproc_boot thread\n"); + ret = PTR_ERR(task); goto err_put_rproc; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ec4aa252d6e8..2922a9908302 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -378,6 +378,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO + depends on HAS_DMA depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST help This enables support for the Freescale DSPI controller in master diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index e89da0af45d2..0314c6b9e044 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev) struct spi_master *master; struct a3700_spi *spi; u32 num_cs = 0; - int ret = 0; + int irq, ret = 0; master = spi_alloc_master(dev, sizeof(*spi)); if (!master) { @@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev) master->unprepare_message = a3700_spi_unprepare_message; master->set_cs = a3700_spi_set_cs; master->flags = SPI_MASTER_HALF_DUPLEX; - master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | + master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD); platform_set_drvdata(pdev, master); @@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev) goto error; } - spi->irq = platform_get_irq(pdev, 0); - if (spi->irq < 0) { - dev_err(dev, "could not get irq: %d\n", spi->irq); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "could not get irq: %d\n", irq); ret = -ENXIO; goto error; } + spi->irq = irq; init_completion(&spi->done); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 319225d7e761..6ab4c7700228 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev) SPI_ENGINE_VERSION_MAJOR(version), SPI_ENGINE_VERSION_MINOR(version), SPI_ENGINE_VERSION_PATCH(version)); - return -ENODEV; + ret = -ENODEV; + goto err_put_master; } spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index d36c11b73a35..02fb96797ac8 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index e31971f91475..837cb8d0bac6 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) static void mid_spi_dma_stop(struct dw_spi *dws) { if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->txchan); + dmaengine_terminate_sync(dws->txchan); clear_bit(TX_BUSY, &dws->dma_chan_busy); } if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->rxchan); + dmaengine_terminate_sync(dws->rxchan); clear_bit(RX_BUSY, &dws->dma_chan_busy); } } diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index b715a26a9148..054012f87567 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = { static int dw_spi_debugfs_init(struct dw_spi *dws) { - dws->debugfs = debugfs_create_dir("dw_spi", NULL); + char name[128]; + + snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev)); + dws->debugfs = debugfs_create_dir(name, NULL); if (!dws->debugfs) return -ENOMEM; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index dd7b5b47291d..d6239fa718be 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); pxa2xx_spi_write(drv_data, SSCR0, tmp); + break; default: tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0012ad02e569..1f00eeb0b5a3 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = { }; static const struct of_device_id sh_msiof_match[] = { - { .compatible = "renesas,sh-msiof", .data = &sh_data }, { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data }, + { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ {}, }; MODULE_DEVICE_TABLE(of, sh_msiof_match); diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 1fbd495e5e63..c7652c35be19 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; @@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index f84069ffa8c6..781ef623233e 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, skb_reserve(skb, BYTE_OFFSET); skb_put(skb, length); skb->protocol = eth_type_trans(skb, skb->dev); - skb->dev->last_rx = jiffies; netif_rx(skb); /* Fill rx ring */ skb_data = xlr_alloc_skb(); diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index f0900d1c4d7b..fc849d4a1b5d 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { /* No more work */ - napi_complete(napi); + napi_complete_done(napi, rx_count); enable_irq(rx_group->irq); } return rx_count; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index e5ba7d1a809f..43a77745e6fb 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; } - dev->last_rx = jiffies; /* Data frame - extract src/dst addresses */ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 82f654305414..b1f2fdfcb718 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, stats = hostap_get_stats(dev); from_assoc_ap = 1; } -#endif - - dev->last_rx = jiffies; -#ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index c1f674f5268c..ca3743d273e0 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget) /* If there aren't any more packets to receive stop the poll */ if (rx_count < budget) - napi_complete(napi); + napi_complete_done(napi, rx_count); return rx_count; } diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 4fe037aeef12..6134eba5cad4 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) &usbin->rxfrm.desc.frame_control, hdrlen); skb->dev = wlandev->netdev; - skb->dev->last_rx = jiffies; /* And set the frame length properly */ skb_trim(skb, data_len + hdrlen); diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 73fcf07254fe..53dbbd69e552 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev, } if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) { - skb->dev->last_rx = jiffies; wlandev->netdev->stats.rx_packets++; wlandev->netdev->stats.rx_bytes += skb->len; netif_rx_ni(skb); @@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg) skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7dfefd66df93..1cadc9eefa21 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: + case TCM_TOO_MANY_TARGET_DESCS: + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: + case TCM_TOO_MANY_SEGMENT_DESCS: + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = { .key = ILLEGAL_REQUEST, .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ }, + [TCM_TOO_MANY_TARGET_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ + }, + [TCM_TOO_MANY_SEGMENT_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ + }, [TCM_PARAMETER_LIST_LENGTH_ERROR] = { .key = ILLEGAL_REQUEST, .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 37d5caebffa6..d828b3b5000b 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, - bool src) +static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, + struct se_device **found_dev) { struct se_device *se_dev; - unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; int rc; - if (src) - dev_wwn = &xop->dst_tid_wwn[0]; - else - dev_wwn = &xop->src_tid_wwn[0]; - mutex_lock(&g_device_mutex); list_for_each_entry(se_dev, &g_device_list, g_dev_node) { @@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op if (rc != 0) continue; - if (src) { - xop->dst_dev = se_dev; - pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" - " se_dev\n", xop->dst_dev); - } else { - xop->src_dev = se_dev; - pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" - " se_dev\n", xop->src_dev); - } + *found_dev = se_dev; + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); rc = target_depend_item(&se_dev->dev_group.cg_item); if (rc != 0) { @@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, - unsigned char *p, bool src) + unsigned char *p, unsigned short cscd_index) { unsigned char *desc = p; unsigned short ript; @@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op return -EINVAL; } - if (src) { + if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { + pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " + "dest\n", cscd_index); + return 0; + } + + if (cscd_index == xop->stdi) { memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* * Determine if the source designator matches the local device @@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" " received xop\n", xop->src_dev); } - } else { + } + + if (cscd_index == xop->dtdi) { memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* - * Determine if the destination designator matches the local device + * Determine if the destination designator matches the local + * device. If @cscd_index corresponds to both source (stdi) and + * destination (dtdi), or dtdi comes after stdi, then + * XCOL_DEST_RECV_OP wins. */ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], XCOPY_NAA_IEEE_REGEX_LEN)) { @@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, { struct se_device *local_dev = se_cmd->se_dev; unsigned char *desc = p; - int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; + unsigned short cscd_index = 0; unsigned short start = 0; - bool src = true; *sense_ret = TCM_INVALID_PARAMETER_LIST; if (offset != 0) { pr_err("XCOPY target descriptor list length is not" " multiple of %d\n", XCOPY_TARGET_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; return -EINVAL; } - if (tdll > 64) { + if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) { pr_err("XCOPY target descriptor supports a maximum" " two src/dest descriptors, tdll: %hu too large..\n", tdll); + /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_TARGET_DESCS; return -EINVAL; } /* @@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, while (start < tdll) { /* - * Check target descriptor identification with 0xE4 type with - * use VPD 0x83 WWPN matching .. + * Check target descriptor identification with 0xE4 type, and + * compare the current index with the CSCD descriptor IDs in + * the segment descriptor. Use VPD 0x83 WWPN matching .. */ switch (desc[0]) { case 0xe4: rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, - &desc[0], src); + &desc[0], cscd_index); if (rc != 0) goto out; - /* - * Assume target descriptors are in source -> destination order.. - */ - if (src) - src = false; - else - src = true; start += XCOPY_TARGET_DESC_LEN; desc += XCOPY_TARGET_DESC_LEN; - ret++; + cscd_index++; break; default: pr_err("XCOPY unsupported descriptor type code:" " 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; goto out; } } - if (xop->op_origin == XCOL_SOURCE_RECV_OP) - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); - else - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); + switch (xop->op_origin) { + case XCOL_SOURCE_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn, + &xop->dst_dev); + break; + case XCOL_DEST_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn, + &xop->src_dev); + break; + default: + pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " + "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); + rc = -EINVAL; + break; + } /* * If a matching IEEE NAA 0x83 descriptor for the requested device * is not located on this node, return COPY_ABORTED with ASQ/ASQC @@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", xop->dst_dev, &xop->dst_tid_wwn[0]); - return ret; + return cscd_index; out: return -EINVAL; @@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op xop->stdi = get_unaligned_be16(&desc[4]); xop->dtdi = get_unaligned_be16(&desc[6]); + + if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX || + xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) { + pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n", + XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi); + return -EINVAL; + } + pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", desc_len, xop->stdi, xop->dtdi, dc); @@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, struct xcopy_op *xop, unsigned char *p, - unsigned int sdll) + unsigned int sdll, sense_reason_t *sense_ret) { unsigned char *desc = p; unsigned int start = 0; int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + *sense_ret = TCM_INVALID_PARAMETER_LIST; + if (offset != 0) { pr_err("XCOPY segment descriptor list length is not" " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; + return -EINVAL; + } + if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { + pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" + " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); + /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; return -EINVAL; } @@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, default: pr_err("XCOPY unsupported segment descriptor" "type: 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; goto out; } } @@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) return TCM_UNSUPPORTED_SCSI_OPCODE; } + if (se_cmd->data_length == 0) { + target_complete_cmd(se_cmd, SAM_STAT_GOOD); + return TCM_NO_SENSE; + } + if (se_cmd->data_length < XCOPY_HDR_LEN) { + pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", + se_cmd->data_length, XCOPY_HDR_LEN); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); if (!xop) { pr_err("Unable to allocate xcopy_op\n"); @@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) */ tdll = get_unaligned_be16(&p[2]); sdll = get_unaligned_be32(&p[8]); + if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) { + pr_err("XCOPY descriptor list length %u exceeds maximum %u\n", + tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } inline_dl = get_unaligned_be32(&p[12]); if (inline_dl != 0) { @@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) goto out; } + if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) { + pr_err("XCOPY parameter truncation: data length %u too small " + "for tdll: %hu sdll: %u inline_dl: %u\n", + se_cmd->data_length, tdll, sdll, inline_dl); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); + /* + * skip over the target descriptors until segment descriptors + * have been passed - CSCD ids are needed to determine src and dest. + */ + seg_desc = &p[16] + tdll; + + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, + sdll, &ret); + if (rc <= 0) + goto out; + + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, + rc * XCOPY_SEGMENT_DESC_LEN); + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); if (rc <= 0) goto out; @@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * XCOPY_TARGET_DESC_LEN); - seg_desc = &p[16]; - seg_desc += (rc * XCOPY_TARGET_DESC_LEN); - - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); - if (rc <= 0) { - xcopy_pt_undepend_remotedev(xop); - goto out; - } transport_kunmap_data_sg(se_cmd); - pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, - rc * XCOPY_SEGMENT_DESC_LEN); INIT_WORK(&xop->xop_work, target_xcopy_do_work); queue_work(xcopy_wq, &xop->xop_work); return TCM_NO_SENSE; diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 4d3d4dd060f2..7c0b105cbe1b 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -1,10 +1,17 @@ #include <target/target_core_base.h> +#define XCOPY_HDR_LEN 16 #define XCOPY_TARGET_DESC_LEN 32 #define XCOPY_SEGMENT_DESC_LEN 28 #define XCOPY_NAA_IEEE_REGEX_LEN 16 #define XCOPY_MAX_SECTORS 1024 +/* + * SPC4r37 6.4.6.1 + * Table 150 — CSCD descriptor ID values + */ +#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF + enum xcopy_origin_list { XCOL_SOURCE_RECV_OP = 0x01, XCOL_DEST_RECV_OP = 0x02, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index b811b0fb61b1..4c7796512453 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -118,12 +118,12 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(struct chip_tsadc_table table, + int (*get_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int *temp); - void (*set_alarm_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); - void (*set_tshut_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); + int (*set_alarm_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); + int (*set_tshut_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); /* Per-table methods */ @@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = { {3452, 115000}, {3437, 120000}, {3421, 125000}, + {0, 125000}, }; static const struct tsadc_table rk3368_code_table[] = { @@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = { {TSADCV3_DATA_MASK, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, +static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table, int temp) { int high, low, mid; - u32 error = 0; + unsigned long num; + unsigned int denom; + u32 error = table->data_mask; low = 0; - high = table.length - 1; + high = (table->length - 1) - 1; /* ignore the last check for table */ mid = (high + low) / 2; /* Return mask code data when the temp is over table range */ - if (temp < table.id[low].temp || temp > table.id[high].temp) { - error = table.data_mask; + if (temp < table->id[low].temp || temp > table->id[high].temp) goto exit; - } while (low <= high) { - if (temp == table.id[mid].temp) - return table.id[mid].code; - else if (temp < table.id[mid].temp) + if (temp == table->id[mid].temp) + return table->id[mid].code; + else if (temp < table->id[mid].temp) high = mid - 1; else low = mid + 1; mid = (low + high) / 2; } + /* + * The conversion code granularity provided by the table. Let's + * assume that the relationship between temperature and + * analog value between 2 table entries is linear and interpolate + * to produce less granular result. + */ + num = abs(table->id[mid + 1].code - table->id[mid].code); + num *= temp - table->id[mid].temp; + denom = table->id[mid + 1].temp - table->id[mid].temp; + + switch (table->mode) { + case ADC_DECREMENT: + return table->id[mid].code - (num / denom); + case ADC_INCREMENT: + return table->id[mid].code + (num / denom); + default: + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return error; + } + exit: - pr_err("Invalid the conversion, error=%d\n", error); + pr_err("%s: invalid temperature, temp=%d error=%d\n", + __func__, temp, error); return error; } -static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, - int *temp) +static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table, + u32 code, int *temp) { unsigned int low = 1; - unsigned int high = table.length - 1; + unsigned int high = table->length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - WARN_ON(table.length < 2); + WARN_ON(table->length < 2); - switch (table.mode) { + switch (table->mode) { case ADC_DECREMENT: - code &= table.data_mask; - if (code < table.id[high].code) + code &= table->data_mask; + if (code <= table->id[high].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code >= table.id[mid].code && - code < table.id[mid - 1].code) + if (code >= table->id[mid].code && + code < table->id[mid - 1].code) break; - else if (code < table.id[mid].code) + else if (code < table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; case ADC_INCREMENT: - code &= table.data_mask; - if (code < table.id[low].code) + code &= table->data_mask; + if (code < table->id[low].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code <= table.id[mid].code && - code > table.id[mid - 1].code) + if (code <= table->id[mid].code && + code > table->id[mid - 1].code) break; - else if (code > table.id[mid].code) + else if (code > table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; default: - pr_err("Invalid the conversion table\n"); + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return -EINVAL; } /* @@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = table.id[mid].temp - table.id[mid - 1].temp; - num *= abs(table.id[mid - 1].code - code); - denom = abs(table.id[mid - 1].code - table.id[mid].code); - *temp = table.id[mid - 1].temp + (num / denom); + num = table->id[mid].temp - table->id[mid - 1].temp; + num *= abs(table->id[mid - 1].code - code); + denom = abs(table->id[mid - 1].code - table->id[mid].code); + *temp = table->id[mid - 1].temp + (num / denom); return 0; } @@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, +static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int *temp) { u32 val; @@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { - u32 alarm_value, int_en; + u32 alarm_value; + u32 int_en, int_clr; + + /* + * In some cases, some sensors didn't need the trip points, the + * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm + * in the end, ignore this case and disable the high temperature + * interrupt. + */ + if (temp == INT_MAX) { + int_clr = readl_relaxed(regs + TSADCV2_INT_EN); + int_clr &= ~TSADCV2_INT_SRC_EN(chn); + writel_relaxed(int_clr, regs + TSADCV2_INT_EN); + return 0; + } /* Make sure the value is valid */ alarm_value = rk_tsadcv2_temp_to_code(table, temp); - if (alarm_value == table.data_mask) - return; + if (alarm_value == table->data_mask) + return -ERANGE; - writel_relaxed(alarm_value & table.data_mask, + writel_relaxed(alarm_value & table->data_mask, regs + TSADCV2_COMP_INT(chn)); int_en = readl_relaxed(regs + TSADCV2_INT_EN); int_en |= TSADCV2_INT_SRC_EN(chn); writel_relaxed(int_en, regs + TSADCV2_INT_EN); + + return 0; } -static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); - if (tshut_value == table.data_mask) - return; + if (tshut_value == table->data_mask) + return -ERANGE; writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ val = readl_relaxed(regs + TSADCV2_AUTO_CON); writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); + + return 0; } static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, @@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high) dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", __func__, sensor->id, low, high); - tsadc->set_alarm_temp(tsadc->table, - sensor->id, thermal->regs, high); - - return 0; + return tsadc->set_alarm_temp(&tsadc->table, + sensor->id, thermal->regs, high); } static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) @@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(tsadc->table, + retval = tsadc->get_temp(&tsadc->table, sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + + error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); sensor->thermal = thermal; sensor->id = id; @@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(thermal->chip->table, + + error = thermal->chip->set_tshut_temp(&thermal->chip->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); } thermal->chip->control(thermal->regs, true); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 641faab6e24b..655591316a88 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -799,6 +799,11 @@ static void thermal_release(struct device *dev) if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + kfree(tz->trip_hyst_attrs); + kfree(tz->trips_attribute_group.attrs); + kfree(tz->device.groups); kfree(tz); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { @@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, 0); - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); @@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); - kfree(tz->device.groups); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 61569a765d9e..76e03a7de9cc 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -675,7 +675,7 @@ static struct console univ8250_console = { .device = uart_console_device, .setup = univ8250_console_setup, .match = univ8250_console_match, - .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV, + .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, }; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index aa0166b6d450..116436b7fa52 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) static void serial8250_io_resume(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); - const struct pciserial_board *board; + struct serial_private *new; if (!priv) return; - board = priv->board; - kfree(priv); - priv = pciserial_init_ports(dev, board); - - if (!IS_ERR(priv)) { - pci_set_drvdata(dev, priv); + new = pciserial_init_ports(dev, priv->board); + if (!IS_ERR(new)) { + pci_set_drvdata(dev, new); + kfree(priv); } } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fe4399b41df6..c13fec451d03 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p) * Enable previously disabled RX interrupts. */ if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { - serial8250_clear_fifos(p); + serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; serial_port_out(&p->port, UART_IER, p->ier); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 168b10cad47b..fabbe76203bb 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port) /* disable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); } + + /* + * Disable the transmitter. + * This is mandatory when DMA is used, otherwise the DMA buffer + * is fully transmitted. + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); + /* Disable interrupts */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); @@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port) /* Enable interrupts */ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + + /* re-enable the transmitter */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); } /* @@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg) */ if (!uart_circ_empty(xmit)) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); + else if ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { + /* DMA done, stop TX, start RX for RS485 */ + atmel_start_rx(port); + } spin_unlock_irqrestore(&port->lock, flags); } @@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port) desc->callback = atmel_complete_tx_dma; desc->callback_param = atmel_port; atmel_port->cookie_tx = dmaengine_submit(desc); - - } else { - if (port->rs485.flags & SER_RS485_ENABLED) { - /* DMA done, stop TX, start RX for RS485 */ - atmel_start_rx(port); - } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 52bbd27e93ae..701c085bb19b 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, - .evbit = { BIT_MASK(EV_KEY) }, - .keybit = { BIT_MASK(KEY_LEFTALT) }, + .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, }, { }, }; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 9548d3e03453..302b8f5f7d27 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -513,8 +513,8 @@ struct dwc2_core_params { /* Gadget parameters */ bool g_dma; bool g_dma_desc; - u16 g_rx_fifo_size; - u16 g_np_tx_fifo_size; + u32 g_rx_fifo_size; + u32 g_np_tx_fifo_size; u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; }; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index c55db4aa54d6..77c5fcf3a5bf 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || @@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, __func__, epctrl, epctrl_reg); /* Allocate DMA descriptor chain for non-ctrl endpoints */ - if (using_desc_dma(hsotg)) { - hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, + if (using_desc_dma(hsotg) && !hs_ep->desc_list) { + hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), &hs_ep->desc_list_dma, GFP_ATOMIC); @@ -3872,7 +3872,7 @@ error1: error2: if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), hs_ep->desc_list, hs_ep->desc_list_dma); hs_ep->desc_list = NULL; @@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) return -EINVAL; } - /* Remove DMA memory allocated for non-control Endpoints */ - if (using_desc_dma(hsotg)) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * - sizeof(struct dwc2_dma_desc), - hs_ep->desc_list, hs_ep->desc_list_dma); - hs_ep->desc_list = NULL; - } - epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); spin_lock_irqsave(&hsotg->lock, flags); @@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 911c3b36ac06..46d0ad5105e4 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (!HCD_HW_ACCESSIBLE(hcd)) goto unlock; + if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) + goto unlock; + if (!hsotg->params.hibernation) goto skip_power_saving; @@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, { #ifdef VERBOSE_DEBUG struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); - char *pipetype; - char *speed; + char *pipetype = NULL; + char *speed = NULL; dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); dev_vdbg(hsotg->dev, " Device address: %d\n", diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 11fe68a4627b..bcd1e19b4076 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param, } /** - * dwc2_set_param_u16() - Set a u16 parameter + * dwc2_set_param_u32() - Set a u32 parameter * * See dwc2_set_param(). */ -static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, +static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param, bool lookup, char *property, u16 legacy, u16 def, u16 min, u16 max) { dwc2_set_param(hsotg, param, lookup, property, - legacy, def, min, max, 2); + legacy, def, min, max, 4); } /** @@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg, * auto-detect if the hardware does not support the * default. */ - dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, + dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size, true, "g-rx-fifo-size", 2048, hw->rx_fifo_size, 16, hw->rx_fifo_size); - dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, + dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size, true, "g-np-tx-fifo-size", 1024, hw->dev_nperio_tx_fifo_size, 16, hw->dev_nperio_tx_fifo_size); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index e27899bb5706..e956306d9b0f 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev) exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); if (IS_ERR(exynos->axius_clk)) { dev_err(dev, "no AXI UpScaler clk specified\n"); - return -ENODEV; + ret = -ENODEV; + goto axius_clk_err; } clk_prepare_enable(exynos->axius_clk); } else { @@ -196,6 +197,7 @@ err3: regulator_disable(exynos->vdd33); err2: clk_disable_unprepare(exynos->axius_clk); +axius_clk_err: clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); return ret; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 002822d98fda..49d685ad0da9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; - kfree(cdev->os_desc_req); + usb_ep_free_request(ep0, cdev->os_desc_req); goto end; } cdev->os_desc_req->context = cdev; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5e746adc8a2d..5490fc51638e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); @@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) __ffs_epfile_read_buffer_free(epfile); ++epfile; } - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } @@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while(count--) { struct usb_endpoint_descriptor *ds; int desc_idx; @@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) ++ep; ++epfile; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); return ret; @@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c, /* cleanup after autoconfig */ spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { if (ep->ep && ep->req) usb_ep_free_request(ep->ep, ep->req); ep->req = NULL; ++ep; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); kfree(func->eps); func->eps = NULL; diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f3212db9bc37..12c7687216e6 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); goto err; } - ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); + sprintf(ep->name, "ep%d", ep->index); + ep->ep.name = ep->name; ep->ep_regs = udc->regs + USBA_EPT_BASE(i); ep->dma_regs = udc->regs + USBA_DMA_BASE(i); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h index 3e1c9d589dfa..b03b2ebfc53a 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.h +++ b/drivers/usb/gadget/udc/atmel_usba_udc.h @@ -280,6 +280,7 @@ struct usba_ep { void __iomem *ep_regs; void __iomem *dma_regs; void __iomem *fifo; + char name[8]; struct usb_ep ep; struct usba_udc *udc; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index ddfab301e366..e5834dd9bcde 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev) return -ENODEV; /* Try to set 64-bit DMA first */ - if (WARN_ON(!pdev->dev.dma_mask)) + if (!pdev->dev.dma_mask) /* Platform did not initialize dma_mask */ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 25f522b09dd9..e32029a31ca4 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); ep->stop_cmds_pending--; - if (xhci->xhc_state & XHCI_STATE_REMOVING) { - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Stop EP timer ran, but another timer marked " - "xHCI as DYING, exiting."); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Stop EP timer ran, but no command pending, " diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0c8deb9ed42d..9a0ec116654a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_urb_free_priv(urb_priv); return ret; } - if ((xhci->xhc_state & XHCI_STATE_DYING) || - (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Ep 0x%x: URB %p to be canceled on " - "non-responsive xHCI host.", - urb->ep->desc.bEndpointAddress, urb); - /* Let the stop endpoint command watchdog timer (which set this - * state) finish cleaning up the endpoint TD lists. We must - * have caught it in the middle of dropping a lock and giving - * back an URB. - */ - goto done; - } ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 4fef50e5c8c1..dd70c88419d2 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused) unsigned i; seq_printf(s, "MUSB (M)HDRC Register Dump\n"); + pm_runtime_get_sync(musb->controller); for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) { switch (musb_regmap[i].size) { @@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused) } } + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return 0; } @@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused) struct musb *musb = s->private; unsigned test; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); if (test & MUSB_TEST_FORCE_HOST) seq_printf(s, "force host\n"); @@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file, u8 test; char buf[18]; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); if (test) { dev_err(musb->controller, "Error: test mode is already set. " "Please do USB Bus Reset to start a new test.\n"); - return count; + goto ret; } memset(buf, 0x00, sizeof(buf)); @@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file, musb_writeb(musb->mregs, MUSB_TESTMODE, test); +ret: + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return count; } @@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused) switch (musb->xceiv->otg->state) { case OTG_STATE_A_HOST: case OTG_STATE_A_WAIT_BCON: + pm_runtime_get_sync(musb->controller); + reg = musb_readb(musb->mregs, MUSB_DEVCTL); connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0; + + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); break; default: connect = -1; @@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; + pm_runtime_get_sync(musb->controller); if (!strncmp(buf, "0", 1)) { switch (musb->xceiv->otg->state) { case OTG_STATE_A_HOST: @@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file, } } + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return count; } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2597b83a8ae2..95aa5233726c 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -95,6 +95,7 @@ struct ch341_private { unsigned baud_rate; /* set baud rate */ u8 line_control; /* set line control value RTS/DTR */ u8 line_status; /* active status of modem control inputs */ + u8 lcr; }; static void ch341_set_termios(struct tty_struct *tty, @@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, value, index, NULL, 0, DEFAULT_TIMEOUT); + if (r < 0) + dev_err(&dev->dev, "failed to send control message: %d\n", r); return r; } @@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - return r; + if (r < bufsize) { + if (r >= 0) { + dev_err(&dev->dev, + "short control message received (%d < %u)\n", + r, bufsize); + r = -EIO; + } + + dev_err(&dev->dev, "failed to receive control message: %d\n", + r); + return r; + } + + return 0; } -static int ch341_init_set_baudrate(struct usb_device *dev, - struct ch341_private *priv, unsigned ctrl) +static int ch341_set_baudrate_lcr(struct usb_device *dev, + struct ch341_private *priv, u8 lcr) { short a; int r; @@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev, factor = 0x10000 - factor; a = (factor & 0xff00) | divisor; - /* 0x9c is "enable SFR_UART Control register and timer" */ - r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, - 0x9c | (ctrl << 8), a | 0x80); + /* + * CH341A buffers data until a full endpoint-size packet (32 bytes) + * has been received unless bit 7 is set. + */ + a |= BIT(7); + + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a); + if (r) + return r; + + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr); + if (r) + return r; return r; } @@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; unsigned long flags; buffer = kmalloc(size, GFP_KERNEL); @@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - /* setup the private status if available */ - if (r == 2) { - r = 0; - spin_lock_irqsave(&priv->lock, flags); - priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; - spin_unlock_irqrestore(&priv->lock, flags); - } else - r = -EPROTO; + spin_lock_irqsave(&priv->lock, flags); + priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; + spin_unlock_irqrestore(&priv->lock, flags); out: kfree(buffer); return r; @@ -200,9 +221,9 @@ out: kfree(buffer); static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; buffer = kmalloc(size, GFP_KERNEL); if (!buffer) @@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - r = ch341_init_set_baudrate(dev, priv, 0); + r = ch341_set_baudrate_lcr(dev, priv, priv->lcr); if (r < 0) goto out; @@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port) spin_lock_init(&priv->lock); priv->baud_rate = DEFAULT_BAUD_RATE; - priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; r = ch341_configure(port->serial->dev, priv); if (r < 0) @@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) r = ch341_configure(serial->dev, priv); if (r) - goto out; + return r; if (tty) ch341_set_termios(tty, port, NULL); @@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) if (r) { dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n", __func__, r); - goto out; + return r; } r = usb_serial_generic_open(tty, port); + if (r) + goto err_kill_interrupt_urb; + + return 0; + +err_kill_interrupt_urb: + usb_kill_urb(port->interrupt_in_urb); -out: return r; + return r; } /* Old_termios contains the original termios settings and @@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty, baud_rate = tty_get_baud_rate(tty); - priv->baud_rate = baud_rate; ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX; switch (C_CSIZE(tty)) { @@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty, ctrl |= CH341_LCR_STOP_BITS_2; if (baud_rate) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); - r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl); + priv->baud_rate = baud_rate; + + r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl); if (r < 0 && old_termios) { priv->baud_rate = tty_termios_baud_rate(old_termios); tty_termios_copy_hw(&tty->termios, old_termios); + } else if (r == 0) { + priv->lcr = ctrl; } - } else { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); } - ch341_set_handshake(port->serial->dev, priv->line_control); + spin_lock_irqsave(&priv->lock, flags); + if (C_BAUD(tty) == B0) + priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); + spin_unlock_irqrestore(&priv->lock, flags); + ch341_set_handshake(port->serial->dev, priv->line_control); } static void ch341_break_ctl(struct tty_struct *tty, int break_state) @@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { - struct ch341_private *priv; - - priv = usb_get_serial_port_data(serial->port[0]); + struct usb_serial_port *port = serial->port[0]; + struct ch341_private *priv = usb_get_serial_port_data(port); + int ret; /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); - return 0; + if (tty_port_initialized(&port->port)) { + ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); + if (ret) { + dev_err(&port->dev, "failed to submit interrupt urb: %d\n", + ret); + return ret; + } + } + + return usb_serial_generic_resume(serial); } static struct usb_serial_driver ch341_device = { diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 0ee190fc1bf8..6cb45757818f 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, status_buf, KLSI_STATUSBUF_LEN, 10000 ); - if (rc < 0) - dev_err(&port->dev, "Reading line status failed (error = %d)\n", - rc); - else { + if (rc != KLSI_STATUSBUF_LEN) { + dev_err(&port->dev, "reading line status failed: %d\n", rc); + if (rc >= 0) + rc = -EIO; + } else { status = get_unaligned_le16(status_buf); dev_info(&port->serial->dev->dev, "read status %x %x\n", diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 79451f7ef1b7..062c205f0046 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, struct scatterlist sg[4], sg_dst; void *dst_buf; size_t dst_size; - const u8 bzero[16] = { 0 }; u8 iv[crypto_skcipher_ivsize(tfm_cbc)]; size_t zero_padding; @@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1)); sg_set_buf(&sg[2], b, blen); /* 0 if well behaved :) */ - sg_set_buf(&sg[3], bzero, zero_padding); + sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0); sg_init_one(&sg_dst, dst_buf, dst_size); skcipher_request_set_tfm(req, tfm_cbc); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index c8823578a1b2..128d10282d16 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data, /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", iommu_group_id(iommu_group), iommu_group); */ table_group = iommu_group_get_iommudata(iommu_group); + if (!table_group) { + ret = -ENODEV; + goto unlock_exit; + } if (tce_groups_attached(container) && (!table_group->ops || !table_group->ops->take_ownership || diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 9266271a787a..b3cc33fa6d26 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -36,7 +36,6 @@ #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/workqueue.h> -#include <linux/pid_namespace.h> #include <linux/mdev.h> #include <linux/notifier.h> @@ -495,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { unsigned long limit; - bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, - CAP_IPC_LOCK); + bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK); struct mm_struct *mm; int ret; bool rsvd; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5dc34653274a..c42e9c305134 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, return r; } +static bool vhost_exceeds_maxpend(struct vhost_net *net) +{ + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *vq = &nvq->vq; + + return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV + == nvq->done_idx; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net) /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ - if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) - % UIO_MAXIOV == nvq->done_idx)) + if (unlikely(vhost_exceeds_maxpend(net))) break; head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, @@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net) msg.msg_control = NULL; ubufs = NULL; } + + total_len += len; + if (total_len < VHOST_NET_WEIGHT && + !vhost_vq_avail_empty(&net->dev, vq) && + likely(!vhost_exceeds_maxpend(net))) { + msg.msg_flags |= MSG_MORE; + } else { + msg.msg_flags &= ~MSG_MORE; + } + /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { @@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); - total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 253310cdaaca..fd6c8b66f06f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - unsigned out, in; + unsigned int out = 0, in = 0; int head, ret, prot_bytes; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); size_t out_size, in_size; @@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { NULL, }; -static struct target_core_fabric_ops vhost_scsi_ops = { +static const struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .name = "vhost", .get_fabric_name = vhost_scsi_get_fabric_name, diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6432603880c..9f118388a5b7 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2241,11 +2241,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) __virtio16 avail_idx; int r; + if (vq->avail_idx != vq->last_avail_idx) + return false; + r = vhost_get_user(vq, avail_idx, &vq->avail->idx); - if (r) + if (unlikely(r)) return false; + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx; + return vq->avail_idx == vq->last_avail_idx; } EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bbbf588540ed..ce5e63d2c66a 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work) static int vhost_vsock_start(struct vhost_vsock *vsock) { + struct vhost_virtqueue *vq; size_t i; int ret; @@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) goto err; for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); if (!vhost_vq_access_ok(vq)) { ret = -EFAULT; - mutex_unlock(&vq->mutex); goto err_vq; } if (!vq->private_data) { vq->private_data = vsock; - vhost_vq_init_access(vq); + ret = vhost_vq_init_access(vq); + if (ret) + goto err_vq; } mutex_unlock(&vq->mutex); @@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) return 0; err_vq: + vq->private_data = NULL; + mutex_unlock(&vq->mutex); + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vq->private_data = NULL; diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index f89245b8ba8e..68a113594808 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); @@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index d47a2fcef818..c71fde5fe835 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -59,6 +59,7 @@ #define pr_fmt(fmt) "virtio-mmio: " fmt #include <linux/acpi.h> +#include <linux/dma-mapping.h> #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) struct virtio_mmio_device *vm_dev; struct resource *mem; unsigned long magic; + int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) @@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev) } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); - if (vm_dev->version == 1) + if (vm_dev->version == 1) { writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + /* + * In the legacy case, ensure our coherently-allocated virtio + * ring will be at an address expressable as a 32-bit PFN. + */ + if (!rc) + dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32 + PAGE_SHIFT)); + } else { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + } + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); + platform_set_drvdata(pdev, vm_dev); return register_virtio_device(&vm_dev->vdev); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 409aeaa49246..7e38ed79c3fc 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev) if (xen_domain()) return true; + /* + * On ARM-based machines, the DMA ops will do the right thing, + * so always use them with legacy devices. + */ + if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)) + return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1); + return false; } diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 6b5ee896af63..7cc51223db1c 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); - *pci_base = (dma_addr_t)vme_base + pci_offset; + *pci_base = (dma_addr_t)*vme_base + pci_offset; *size = (unsigned long long)((vme_bound - *vme_base) + granularity); *enabled = 0; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 112ce422dc22..2a165cc8a43c 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -42,6 +42,7 @@ static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; +static uint64_t callback_via; static unsigned long alloc_xen_mmio(unsigned long len) { @@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len) return addr; } +static uint64_t get_callback_via(struct pci_dev *pdev) +{ + u8 pin; + int irq; + + irq = pdev->irq; + if (irq < 16) + return irq; /* ISA IRQ */ + + pin = pdev->pin; + + /* We don't know the GSI. Specify the PCI INTx line instead. */ + return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */ + ((uint64_t)pci_domain_nr(pdev->bus) << 32) | + ((uint64_t)pdev->bus->number << 16) | + ((uint64_t)(pdev->devfn & 0xff) << 8) | + ((uint64_t)(pin - 1) & 3); +} + +static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) +{ + xen_hvm_evtchn_do_upcall(); + return IRQ_HANDLED; +} + +static int xen_allocate_irq(struct pci_dev *pdev) +{ + return request_irq(pdev->irq, do_hvm_evtchn_intr, + IRQF_NOBALANCING | IRQF_TRIGGER_RISING, + "xen-platform-pci", pdev); +} + +static int platform_pci_resume(struct pci_dev *pdev) +{ + int err; + if (!xen_pv_domain()) + return 0; + err = xen_set_callback_via(callback_via); + if (err) { + dev_err(&pdev->dev, "platform_pci_resume failure!\n"); + return err; + } + return 0; +} + static int platform_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev, platform_mmio = mmio_addr; platform_mmiolen = mmio_len; + /* + * Xen HVM guests always use the vector callback mechanism. + * L1 Dom0 in a nested Xen environment is a PV guest inside in an + * HVM environment. It needs the platform-pci driver to get + * notifications from L0 Xen, but it cannot use the vector callback + * as it is not exported by L1 Xen. + */ + if (xen_pv_domain()) { + ret = xen_allocate_irq(pdev); + if (ret) { + dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); + goto out; + } + callback_via = get_callback_via(pdev); + ret = xen_set_callback_via(callback_via); + if (ret) { + dev_warn(&pdev->dev, "Unable to set the evtchn callback " + "err=%d\n", ret); + goto out; + } + } + max_nr_gframes = gnttab_max_grant_frames(); grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); @@ -123,6 +191,9 @@ static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_probe, .id_table = platform_pci_tbl, +#ifdef CONFIG_PM + .resume_early = platform_pci_resume, +#endif }; builtin_pci_driver(platform_driver); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f905d6eeb048..f8afc6dcc29f 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, size, dir, attrs); - dev_addr = xen_phys_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sgl) = 0; return 0; } + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, sg->length, dir, attrs); - sg->dma_address = xen_phys_to_bus(map); + sg->dma_address = dev_addr; } else { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed |