diff options
Diffstat (limited to 'drivers/net')
993 files changed, 64832 insertions, 27345 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 944ec3c9282c..891846655000 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -149,9 +149,9 @@ config MACVTAP config IPVLAN tristate "IP-VLAN support" depends on INET - depends on IPV6 + depends on IPV6 || !IPV6 depends on NETFILTER - depends on NET_L3_MASTER_DEV + select NET_L3_MASTER_DEV ---help--- This allows one to create virtual devices off of a main interface and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) @@ -500,6 +500,7 @@ source "drivers/net/hyperv/Kconfig" config NETDEVSIM tristate "Simulated networking device" depends on DEBUG_FS + depends on MAY_USE_DEVLINK help This driver is a developer testing tool and software model that can be used to test various control path networking APIs, especially diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 04c3b747812c..91e67e375dd4 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-$(CONFIG_ETRAX_ETHERNET) += cris/ obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 11fe71278f40..3afda6561434 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -114,12 +114,6 @@ static struct devprobe2 m68k_probes[] __initdata = { #ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ {mvme147lance_probe, 0}, #endif -#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ - {mac8390_probe, 0}, -#endif -#ifdef CONFIG_MAC89x0 - {mac89x0_probe, 0}, -#endif {NULL, 0}, }; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c669554d70bb..b7b113018853 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1528,39 +1528,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_close; } - /* If the mode uses primary, then the following is handled by - * bond_change_active_slave(). - */ - if (!bond_uses_primary(bond)) { - /* set promiscuity level to new slave */ - if (bond_dev->flags & IFF_PROMISC) { - res = dev_set_promiscuity(slave_dev, 1); - if (res) - goto err_close; - } - - /* set allmulti level to new slave */ - if (bond_dev->flags & IFF_ALLMULTI) { - res = dev_set_allmulti(slave_dev, 1); - if (res) - goto err_close; - } - - netif_addr_lock_bh(bond_dev); - - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - - netif_addr_unlock_bh(bond_dev); - } - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); - } - res = vlan_vids_add_by_dev(slave_dev, bond_dev); if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", @@ -1725,6 +1692,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_upper_unlink; } + /* If the mode uses primary, then the following is handled by + * bond_change_active_slave(). + */ + if (!bond_uses_primary(bond)) { + /* set promiscuity level to new slave */ + if (bond_dev->flags & IFF_PROMISC) { + res = dev_set_promiscuity(slave_dev, 1); + if (res) + goto err_sysfs_del; + } + + /* set allmulti level to new slave */ + if (bond_dev->flags & IFF_ALLMULTI) { + res = dev_set_allmulti(slave_dev, 1); + if (res) { + if (bond_dev->flags & IFF_PROMISC) + dev_set_promiscuity(slave_dev, -1); + goto err_sysfs_del; + } + } + + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + /* add lacpdu mc addr to mc list */ + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + + dev_mc_add(slave_dev, lacpdu_multicast); + } + } + bond->slave_cnt++; bond_compute_features(bond); bond_set_carrier(bond); @@ -1748,6 +1749,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, return 0; /* Undo stages on error */ +err_sysfs_del: + bond_sysfs_slave_del(new_slave); + err_upper_unlink: bond_upper_dev_unlink(bond, new_slave); @@ -1755,9 +1759,6 @@ err_unregister: netdev_rx_handler_unregister(slave_dev); err_detach: - if (!bond_uses_primary(bond)) - bond_hw_addr_flush(bond_dev, slave_dev); - vlan_vids_del_by_dev(slave_dev, bond_dev); if (rcu_access_pointer(bond->primary_slave) == new_slave) RCU_INIT_POINTER(bond->primary_slave, NULL); diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index f7799321dffb..01059f1a7bca 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -287,7 +287,7 @@ void bond_create_proc_entry(struct bonding *bond) if (bn->proc_dir) { bond->proc_entry = proc_create_data(bond_dev->name, - S_IRUGO, bn->proc_dir, + 0444, bn->proc_dir, &bond_info_fops, bond); if (bond->proc_entry == NULL) netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n", diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 040b493f60ae..6096440e96ea 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -147,7 +147,7 @@ err_no_cmd: static const struct class_attribute class_attr_bonding_masters = { .attr = { .name = "bonding_masters", - .mode = S_IWUSR | S_IRUGO, + .mode = 0644, }, .show = bonding_show_bonds, .store = bonding_store_bonds, @@ -202,7 +202,7 @@ static ssize_t bonding_show_slaves(struct device *d, return res; } -static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, +static DEVICE_ATTR(slaves, 0644, bonding_show_slaves, bonding_sysfs_store_option); /* Show the bonding mode. */ @@ -216,8 +216,7 @@ static ssize_t bonding_show_mode(struct device *d, return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond)); } -static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, - bonding_show_mode, bonding_sysfs_store_option); +static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option); /* Show the bonding transmit hash method. */ static ssize_t bonding_show_xmit_hash(struct device *d, @@ -231,7 +230,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy); } -static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(xmit_hash_policy, 0644, bonding_show_xmit_hash, bonding_sysfs_store_option); /* Show arp_validate. */ @@ -247,7 +246,7 @@ static ssize_t bonding_show_arp_validate(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate); } -static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, +static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate, bonding_sysfs_store_option); /* Show arp_all_targets. */ @@ -263,7 +262,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.arp_all_targets); } -static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(arp_all_targets, 0644, bonding_show_arp_all_targets, bonding_sysfs_store_option); /* Show fail_over_mac. */ @@ -279,7 +278,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac); } -static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(fail_over_mac, 0644, bonding_show_fail_over_mac, bonding_sysfs_store_option); /* Show the arp timer interval. */ @@ -291,7 +290,7 @@ static ssize_t bonding_show_arp_interval(struct device *d, return sprintf(buf, "%d\n", bond->params.arp_interval); } -static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(arp_interval, 0644, bonding_show_arp_interval, bonding_sysfs_store_option); /* Show the arp targets. */ @@ -312,7 +311,7 @@ static ssize_t bonding_show_arp_targets(struct device *d, return res; } -static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(arp_ip_target, 0644, bonding_show_arp_targets, bonding_sysfs_store_option); /* Show the up and down delays. */ @@ -324,7 +323,7 @@ static ssize_t bonding_show_downdelay(struct device *d, return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon); } -static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(downdelay, 0644, bonding_show_downdelay, bonding_sysfs_store_option); static ssize_t bonding_show_updelay(struct device *d, @@ -336,7 +335,7 @@ static ssize_t bonding_show_updelay(struct device *d, return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon); } -static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(updelay, 0644, bonding_show_updelay, bonding_sysfs_store_option); /* Show the LACP interval. */ @@ -351,7 +350,7 @@ static ssize_t bonding_show_lacp(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast); } -static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(lacp_rate, 0644, bonding_show_lacp, bonding_sysfs_store_option); static ssize_t bonding_show_min_links(struct device *d, @@ -362,7 +361,7 @@ static ssize_t bonding_show_min_links(struct device *d, return sprintf(buf, "%u\n", bond->params.min_links); } -static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(min_links, 0644, bonding_show_min_links, bonding_sysfs_store_option); static ssize_t bonding_show_ad_select(struct device *d, @@ -376,7 +375,7 @@ static ssize_t bonding_show_ad_select(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select); } -static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(ad_select, 0644, bonding_show_ad_select, bonding_sysfs_store_option); /* Show the number of peer notifications to send after a failover event. */ @@ -387,9 +386,9 @@ static ssize_t bonding_show_num_peer_notif(struct device *d, struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.num_peer_notif); } -static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(num_grat_arp, 0644, bonding_show_num_peer_notif, bonding_sysfs_store_option); -static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(num_unsol_na, 0644, bonding_show_num_peer_notif, bonding_sysfs_store_option); /* Show the MII monitor interval. */ @@ -401,7 +400,7 @@ static ssize_t bonding_show_miimon(struct device *d, return sprintf(buf, "%d\n", bond->params.miimon); } -static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(miimon, 0644, bonding_show_miimon, bonding_sysfs_store_option); /* Show the primary slave. */ @@ -421,7 +420,7 @@ static ssize_t bonding_show_primary(struct device *d, return count; } -static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(primary, 0644, bonding_show_primary, bonding_sysfs_store_option); /* Show the primary_reselect flag. */ @@ -438,7 +437,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.primary_reselect); } -static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(primary_reselect, 0644, bonding_show_primary_reselect, bonding_sysfs_store_option); /* Show the use_carrier flag. */ @@ -450,7 +449,7 @@ static ssize_t bonding_show_carrier(struct device *d, return sprintf(buf, "%d\n", bond->params.use_carrier); } -static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(use_carrier, 0644, bonding_show_carrier, bonding_sysfs_store_option); @@ -471,7 +470,7 @@ static ssize_t bonding_show_active_slave(struct device *d, return count; } -static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(active_slave, 0644, bonding_show_active_slave, bonding_sysfs_store_option); /* Show link status of the bond interface. */ @@ -484,7 +483,7 @@ static ssize_t bonding_show_mii_status(struct device *d, return sprintf(buf, "%s\n", active ? "up" : "down"); } -static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); +static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL); /* Show current 802.3ad aggregator ID. */ static ssize_t bonding_show_ad_aggregator(struct device *d, @@ -503,7 +502,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d, return count; } -static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); +static DEVICE_ATTR(ad_aggregator, 0444, bonding_show_ad_aggregator, NULL); /* Show number of active 802.3ad ports. */ @@ -523,7 +522,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d, return count; } -static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); +static DEVICE_ATTR(ad_num_ports, 0444, bonding_show_ad_num_ports, NULL); /* Show current 802.3ad actor key. */ @@ -543,7 +542,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d, return count; } -static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); +static DEVICE_ATTR(ad_actor_key, 0444, bonding_show_ad_actor_key, NULL); /* Show current 802.3ad partner key. */ @@ -563,7 +562,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d, return count; } -static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); +static DEVICE_ATTR(ad_partner_key, 0444, bonding_show_ad_partner_key, NULL); /* Show current 802.3ad partner mac. */ @@ -582,7 +581,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, return count; } -static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); +static DEVICE_ATTR(ad_partner_mac, 0444, bonding_show_ad_partner_mac, NULL); /* Show the queue_ids of the slaves in the current bond. */ static ssize_t bonding_show_queue_id(struct device *d, @@ -615,7 +614,7 @@ static ssize_t bonding_show_queue_id(struct device *d, return res; } -static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, +static DEVICE_ATTR(queue_id, 0644, bonding_show_queue_id, bonding_sysfs_store_option); @@ -628,7 +627,7 @@ static ssize_t bonding_show_slaves_active(struct device *d, return sprintf(buf, "%d\n", bond->params.all_slaves_active); } -static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(all_slaves_active, 0644, bonding_show_slaves_active, bonding_sysfs_store_option); /* Show the number of IGMP membership reports to send on link failure */ @@ -640,7 +639,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d, return sprintf(buf, "%d\n", bond->params.resend_igmp); } -static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(resend_igmp, 0644, bonding_show_resend_igmp, bonding_sysfs_store_option); @@ -652,7 +651,7 @@ static ssize_t bonding_show_lp_interval(struct device *d, return sprintf(buf, "%d\n", bond->params.lp_interval); } -static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(lp_interval, 0644, bonding_show_lp_interval, bonding_sysfs_store_option); static ssize_t bonding_show_tlb_dynamic_lb(struct device *d, @@ -662,7 +661,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d, struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb); } -static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(tlb_dynamic_lb, 0644, bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option); static ssize_t bonding_show_packets_per_slave(struct device *d, @@ -674,7 +673,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d, return sprintf(buf, "%u\n", packets_per_slave); } -static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(packets_per_slave, 0644, bonding_show_packets_per_slave, bonding_sysfs_store_option); static ssize_t bonding_show_ad_actor_sys_prio(struct device *d, @@ -688,7 +687,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d, return 0; } -static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(ad_actor_sys_prio, 0644, bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option); static ssize_t bonding_show_ad_actor_system(struct device *d, @@ -703,7 +702,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d, return 0; } -static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(ad_actor_system, 0644, bonding_show_ad_actor_system, bonding_sysfs_store_option); static ssize_t bonding_show_ad_user_port_key(struct device *d, @@ -717,7 +716,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d, return 0; } -static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(ad_user_port_key, 0644, bonding_show_ad_user_port_key, bonding_sysfs_store_option); static struct attribute *per_bond_attrs[] = { diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 7d16c51e6913..2f120b2ffef0 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -25,8 +25,8 @@ const struct slave_attribute slave_attr_##_name = { \ .mode = _mode }, \ .show = _show, \ }; -#define SLAVE_ATTR_RO(_name) \ - SLAVE_ATTR(_name, S_IRUGO, _name##_show) +#define SLAVE_ATTR_RO(_name) \ + SLAVE_ATTR(_name, 0444, _name##_show) static ssize_t state_show(struct slave *slave, char *buf) { diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 709838e4c062..a0f954f36c09 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -40,20 +40,20 @@ static LIST_HEAD(ser_list); static LIST_HEAD(ser_release_list); static bool ser_loop; -module_param(ser_loop, bool, S_IRUGO); +module_param(ser_loop, bool, 0444); MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); static bool ser_use_stx = true; -module_param(ser_use_stx, bool, S_IRUGO); +module_param(ser_use_stx, bool, 0444); MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); static bool ser_use_fcs = true; -module_param(ser_use_fcs, bool, S_IRUGO); +module_param(ser_use_fcs, bool, 0444); MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); static int ser_write_chunk = MAX_WRITE_CHUNK; -module_param(ser_write_chunk, int, S_IRUGO); +module_param(ser_write_chunk, int, 0444); MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART."); @@ -97,21 +97,21 @@ static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir); if (!IS_ERR(ser->debugfs_tty_dir)) { - debugfs_create_blob("last_tx_msg", S_IRUSR, - ser->debugfs_tty_dir, - &ser->tx_blob); + debugfs_create_blob("last_tx_msg", 0400, + ser->debugfs_tty_dir, + &ser->tx_blob); - debugfs_create_blob("last_rx_msg", S_IRUSR, - ser->debugfs_tty_dir, - &ser->rx_blob); + debugfs_create_blob("last_rx_msg", 0400, + ser->debugfs_tty_dir, + &ser->rx_blob); - debugfs_create_x32("ser_state", S_IRUSR, - ser->debugfs_tty_dir, - (u32 *)&ser->state); + debugfs_create_x32("ser_state", 0400, + ser->debugfs_tty_dir, + (u32 *)&ser->state); - debugfs_create_x8("tty_status", S_IRUSR, - ser->debugfs_tty_dir, - &ser->tty_status); + debugfs_create_x8("tty_status", 0400, + ser->debugfs_tty_dir, + &ser->tty_status); } ser->tx_blob.data = ser->tx_data; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 980eace53d44..d28a1398c091 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -35,27 +35,27 @@ MODULE_DESCRIPTION("CAIF SPI driver"); #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) static bool spi_loop; -module_param(spi_loop, bool, S_IRUGO); +module_param(spi_loop, bool, 0444); MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); /* SPI frame alignment. */ -module_param(spi_frm_align, int, S_IRUGO); +module_param(spi_frm_align, int, 0444); MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); /* * SPI padding options. * Warning: must be a base of 2 (& operation used) and can not be zero ! */ -module_param(spi_up_head_align, int, S_IRUGO); +module_param(spi_up_head_align, int, 0444); MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); -module_param(spi_up_tail_align, int, S_IRUGO); +module_param(spi_up_tail_align, int, 0444); MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment."); -module_param(spi_down_head_align, int, S_IRUGO); +module_param(spi_down_head_align, int, 0444); MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment."); -module_param(spi_down_tail_align, int, S_IRUGO); +module_param(spi_down_tail_align, int, 0444); MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment."); #ifdef CONFIG_ARM @@ -250,10 +250,10 @@ static const struct file_operations dbgfs_frame_fops = { static inline void dev_debugfs_add(struct cfspi *cfspi) { cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root); - cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO, + cfspi->dbgfs_state = debugfs_create_file("state", 0444, cfspi->dbgfs_dir, cfspi, &dbgfs_state_fops); - cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO, + cfspi->dbgfs_frame = debugfs_create_file("frame", 0444, cfspi->dbgfs_dir, cfspi, &dbgfs_frame_fops); } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index c3d104feee13..2814e0dee4bb 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -629,21 +629,21 @@ static inline void debugfs_init(struct cfv_info *cfv) if (IS_ERR(cfv->debugfs)) return; - debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs, + debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs, &cfv->stats.rx_napi_complete); - debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs, + debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs, &cfv->stats.rx_napi_resched); - debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs, + debugfs_create_u32("rx-nomem", 0400, cfv->debugfs, &cfv->stats.rx_nomem); - debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs, + debugfs_create_u32("rx-kicks", 0400, cfv->debugfs, &cfv->stats.rx_kicks); - debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs, + debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs, &cfv->stats.tx_full_ring); - debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs, + debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs, &cfv->stats.tx_no_mem); - debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs, + debugfs_create_u32("tx-kicks", 0400, cfv->debugfs, &cfv->stats.tx_kicks); - debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs, + debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs, &cfv->stats.tx_flow_on); } diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index ac4ff394bc56..2cb75988b328 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -88,15 +88,6 @@ config CAN_AT91 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. -config CAN_BFIN - depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x - tristate "Analog Devices Blackfin on-chip CAN" - ---help--- - Driver for the Analog Devices Blackfin on-chip CAN controllers - - To compile this driver as a module, choose M here: the - module will be called bfin_can. - config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" depends on ARM || PPC diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 02b8ed794564..44922bf29b6a 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -19,7 +19,6 @@ obj-y += usb/ obj-y += softing/ obj-$(CONFIG_CAN_AT91) += at91_can.o -obj-$(CONFIG_CAN_BFIN) += bfin_can.o obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f37ce0e1b603..d98c69045b17 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1224,8 +1224,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev, return ret; } -static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, - at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); +static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c deleted file mode 100644 index 1deb8ff90a89..000000000000 --- a/drivers/net/can/bfin_can.c +++ /dev/null @@ -1,784 +0,0 @@ -/* - * Blackfin On-Chip CAN Driver - * - * Copyright 2004-2009 Analog Devices Inc. - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Licensed under the GPL-2 or later. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bitops.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/platform_device.h> - -#include <linux/can/dev.h> -#include <linux/can/error.h> - -#include <asm/portmux.h> - -#define DRV_NAME "bfin_can" -#define BFIN_CAN_TIMEOUT 100 -#define TX_ECHO_SKB_MAX 1 - -/* transmit and receive channels */ -#define TRANSMIT_CHL 24 -#define RECEIVE_STD_CHL 0 -#define RECEIVE_EXT_CHL 4 -#define RECEIVE_RTR_CHL 8 -#define RECEIVE_EXT_RTR_CHL 12 -#define MAX_CHL_NUMBER 32 - -/* All Blackfin system MMRs are padded to 32bits even if the register - * itself is only 16bits. So use a helper macro to streamline this - */ -#define __BFP(m) u16 m; u16 __pad_##m - -/* bfin can registers layout */ -struct bfin_can_mask_regs { - __BFP(aml); - __BFP(amh); -}; - -struct bfin_can_channel_regs { - /* data[0,2,4,6] -> data{0,1,2,3} while data[1,3,5,7] is padding */ - u16 data[8]; - __BFP(dlc); - __BFP(tsv); - __BFP(id0); - __BFP(id1); -}; - -struct bfin_can_regs { - /* global control and status registers */ - __BFP(mc1); /* offset 0x00 */ - __BFP(md1); /* offset 0x04 */ - __BFP(trs1); /* offset 0x08 */ - __BFP(trr1); /* offset 0x0c */ - __BFP(ta1); /* offset 0x10 */ - __BFP(aa1); /* offset 0x14 */ - __BFP(rmp1); /* offset 0x18 */ - __BFP(rml1); /* offset 0x1c */ - __BFP(mbtif1); /* offset 0x20 */ - __BFP(mbrif1); /* offset 0x24 */ - __BFP(mbim1); /* offset 0x28 */ - __BFP(rfh1); /* offset 0x2c */ - __BFP(opss1); /* offset 0x30 */ - u32 __pad1[3]; - __BFP(mc2); /* offset 0x40 */ - __BFP(md2); /* offset 0x44 */ - __BFP(trs2); /* offset 0x48 */ - __BFP(trr2); /* offset 0x4c */ - __BFP(ta2); /* offset 0x50 */ - __BFP(aa2); /* offset 0x54 */ - __BFP(rmp2); /* offset 0x58 */ - __BFP(rml2); /* offset 0x5c */ - __BFP(mbtif2); /* offset 0x60 */ - __BFP(mbrif2); /* offset 0x64 */ - __BFP(mbim2); /* offset 0x68 */ - __BFP(rfh2); /* offset 0x6c */ - __BFP(opss2); /* offset 0x70 */ - u32 __pad2[3]; - __BFP(clock); /* offset 0x80 */ - __BFP(timing); /* offset 0x84 */ - __BFP(debug); /* offset 0x88 */ - __BFP(status); /* offset 0x8c */ - __BFP(cec); /* offset 0x90 */ - __BFP(gis); /* offset 0x94 */ - __BFP(gim); /* offset 0x98 */ - __BFP(gif); /* offset 0x9c */ - __BFP(control); /* offset 0xa0 */ - __BFP(intr); /* offset 0xa4 */ - __BFP(version); /* offset 0xa8 */ - __BFP(mbtd); /* offset 0xac */ - __BFP(ewr); /* offset 0xb0 */ - __BFP(esr); /* offset 0xb4 */ - u32 __pad3[2]; - __BFP(ucreg); /* offset 0xc0 */ - __BFP(uccnt); /* offset 0xc4 */ - __BFP(ucrc); /* offset 0xc8 */ - __BFP(uccnf); /* offset 0xcc */ - u32 __pad4[1]; - __BFP(version2); /* offset 0xd4 */ - u32 __pad5[10]; - - /* channel(mailbox) mask and message registers */ - struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ - struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ -}; - -#undef __BFP - -#define SRS 0x0001 /* Software Reset */ -#define SER 0x0008 /* Stuff Error */ -#define BOIM 0x0008 /* Enable Bus Off Interrupt */ -#define CCR 0x0080 /* CAN Configuration Mode Request */ -#define CCA 0x0080 /* Configuration Mode Acknowledge */ -#define SAM 0x0080 /* Sampling */ -#define AME 0x8000 /* Acceptance Mask Enable */ -#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */ -#define RMLIS 0x0080 /* RX Message Lost IRQ Status */ -#define RTR 0x4000 /* Remote Frame Transmission Request */ -#define BOIS 0x0008 /* Bus Off IRQ Status */ -#define IDE 0x2000 /* Identifier Extension */ -#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */ -#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */ -#define EWTIS 0x0001 /* TX Error Count IRQ Status */ -#define EWRIS 0x0002 /* RX Error Count IRQ Status */ -#define BEF 0x0040 /* Bit Error Flag */ -#define FER 0x0080 /* Form Error Flag */ -#define SMR 0x0020 /* Sleep Mode Request */ -#define SMACK 0x0008 /* Sleep Mode Acknowledge */ - -/* - * bfin can private data - */ -struct bfin_can_priv { - struct can_priv can; /* must be the first member */ - struct net_device *dev; - void __iomem *membase; - int rx_irq; - int tx_irq; - int err_irq; - unsigned short *pin_list; -}; - -/* - * bfin can timing parameters - */ -static const struct can_bittiming_const bfin_can_bittiming_const = { - .name = DRV_NAME, - .tseg1_min = 1, - .tseg1_max = 16, - .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, - /* - * Although the BRP field can be set to any value, it is recommended - * that the value be greater than or equal to 4, as restrictions - * apply to the bit timing configuration when BRP is less than 4. - */ - .brp_min = 4, - .brp_max = 1024, - .brp_inc = 1, -}; - -static int bfin_can_set_bittiming(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - struct can_bittiming *bt = &priv->can.bittiming; - u16 clk, timing; - - clk = bt->brp - 1; - timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) | - ((bt->phase_seg2 - 1) << 4); - - /* - * If the SAM bit is set, the input signal is oversampled three times - * at the SCLK rate. - */ - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - timing |= SAM; - - writew(clk, ®->clock); - writew(timing, ®->timing); - - netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing); - - return 0; -} - -static void bfin_can_set_reset_mode(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - int timeout = BFIN_CAN_TIMEOUT; - int i; - - /* disable interrupts */ - writew(0, ®->mbim1); - writew(0, ®->mbim2); - writew(0, ®->gim); - - /* reset can and enter configuration mode */ - writew(SRS | CCR, ®->control); - writew(CCR, ®->control); - while (!(readw(®->control) & CCA)) { - udelay(10); - if (--timeout == 0) { - netdev_err(dev, "fail to enter configuration mode\n"); - BUG(); - } - } - - /* - * All mailbox configurations are marked as inactive - * by writing to CAN Mailbox Configuration Registers 1 and 2 - * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled - */ - writew(0, ®->mc1); - writew(0, ®->mc2); - - /* Set Mailbox Direction */ - writew(0xFFFF, ®->md1); /* mailbox 1-16 are RX */ - writew(0, ®->md2); /* mailbox 17-32 are TX */ - - /* RECEIVE_STD_CHL */ - for (i = 0; i < 2; i++) { - writew(0, ®->chl[RECEIVE_STD_CHL + i].id0); - writew(AME, ®->chl[RECEIVE_STD_CHL + i].id1); - writew(0, ®->chl[RECEIVE_STD_CHL + i].dlc); - writew(0x1FFF, ®->msk[RECEIVE_STD_CHL + i].amh); - writew(0xFFFF, ®->msk[RECEIVE_STD_CHL + i].aml); - } - - /* RECEIVE_EXT_CHL */ - for (i = 0; i < 2; i++) { - writew(0, ®->chl[RECEIVE_EXT_CHL + i].id0); - writew(AME | IDE, ®->chl[RECEIVE_EXT_CHL + i].id1); - writew(0, ®->chl[RECEIVE_EXT_CHL + i].dlc); - writew(0x1FFF, ®->msk[RECEIVE_EXT_CHL + i].amh); - writew(0xFFFF, ®->msk[RECEIVE_EXT_CHL + i].aml); - } - - writew(BIT(TRANSMIT_CHL - 16), ®->mc2); - writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), ®->mc1); - - priv->can.state = CAN_STATE_STOPPED; -} - -static void bfin_can_set_normal_mode(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - int timeout = BFIN_CAN_TIMEOUT; - - /* - * leave configuration mode - */ - writew(readw(®->control) & ~CCR, ®->control); - - while (readw(®->status) & CCA) { - udelay(10); - if (--timeout == 0) { - netdev_err(dev, "fail to leave configuration mode\n"); - BUG(); - } - } - - /* - * clear _All_ tx and rx interrupts - */ - writew(0xFFFF, ®->mbtif1); - writew(0xFFFF, ®->mbtif2); - writew(0xFFFF, ®->mbrif1); - writew(0xFFFF, ®->mbrif2); - - /* - * clear global interrupt status register - */ - writew(0x7FF, ®->gis); /* overwrites with '1' */ - - /* - * Initialize Interrupts - * - set bits in the mailbox interrupt mask register - * - global interrupt mask - */ - writew(BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL), ®->mbim1); - writew(BIT(TRANSMIT_CHL - 16), ®->mbim2); - - writew(EPIM | BOIM | RMLIM, ®->gim); -} - -static void bfin_can_start(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - - /* enter reset mode */ - if (priv->can.state != CAN_STATE_STOPPED) - bfin_can_set_reset_mode(dev); - - /* leave reset mode */ - bfin_can_set_normal_mode(dev); -} - -static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode) -{ - switch (mode) { - case CAN_MODE_START: - bfin_can_start(dev); - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - break; - - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int bfin_can_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - - u16 cec = readw(®->cec); - - bec->txerr = cec >> 8; - bec->rxerr = cec; - - return 0; -} - -static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - struct can_frame *cf = (struct can_frame *)skb->data; - u8 dlc = cf->can_dlc; - canid_t id = cf->can_id; - u8 *data = cf->data; - u16 val; - int i; - - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - netif_stop_queue(dev); - - /* fill id */ - if (id & CAN_EFF_FLAG) { - writew(id, ®->chl[TRANSMIT_CHL].id0); - val = ((id & 0x1FFF0000) >> 16) | IDE; - } else - val = (id << 2); - if (id & CAN_RTR_FLAG) - val |= RTR; - writew(val | AME, ®->chl[TRANSMIT_CHL].id1); - - /* fill payload */ - for (i = 0; i < 8; i += 2) { - val = ((7 - i) < dlc ? (data[7 - i]) : 0) + - ((6 - i) < dlc ? (data[6 - i] << 8) : 0); - writew(val, ®->chl[TRANSMIT_CHL].data[i]); - } - - /* fill data length code */ - writew(dlc, ®->chl[TRANSMIT_CHL].dlc); - - can_put_echo_skb(skb, dev, 0); - - /* set transmit request */ - writew(BIT(TRANSMIT_CHL - 16), ®->trs2); - - return 0; -} - -static void bfin_can_rx(struct net_device *dev, u16 isrc) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct bfin_can_regs __iomem *reg = priv->membase; - struct can_frame *cf; - struct sk_buff *skb; - int obj; - int i; - u16 val; - - skb = alloc_can_skb(dev, &cf); - if (skb == NULL) - return; - - /* get id */ - if (isrc & BIT(RECEIVE_EXT_CHL)) { - /* extended frame format (EFF) */ - cf->can_id = ((readw(®->chl[RECEIVE_EXT_CHL].id1) - & 0x1FFF) << 16) - + readw(®->chl[RECEIVE_EXT_CHL].id0); - cf->can_id |= CAN_EFF_FLAG; - obj = RECEIVE_EXT_CHL; - } else { - /* standard frame format (SFF) */ - cf->can_id = (readw(®->chl[RECEIVE_STD_CHL].id1) - & 0x1ffc) >> 2; - obj = RECEIVE_STD_CHL; - } - if (readw(®->chl[obj].id1) & RTR) - cf->can_id |= CAN_RTR_FLAG; - - /* get data length code */ - cf->can_dlc = get_can_dlc(readw(®->chl[obj].dlc) & 0xF); - - /* get payload */ - for (i = 0; i < 8; i += 2) { - val = readw(®->chl[obj].data[i]); - cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0; - cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; - } - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); -} - -static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - enum can_state state = priv->can.state; - - skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) - return -ENOMEM; - - if (isrc & RMLIS) { - /* data overrun interrupt */ - netdev_dbg(dev, "data overrun interrupt\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; - } - - if (isrc & BOIS) { - netdev_dbg(dev, "bus-off mode interrupt\n"); - state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - priv->can.can_stats.bus_off++; - can_bus_off(dev); - } - - if (isrc & EPIS) { - /* error passive interrupt */ - netdev_dbg(dev, "error passive interrupt\n"); - state = CAN_STATE_ERROR_PASSIVE; - } - - if ((isrc & EWTIS) || (isrc & EWRIS)) { - netdev_dbg(dev, "Error Warning Transmit/Receive Interrupt\n"); - state = CAN_STATE_ERROR_WARNING; - } - - if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || - state == CAN_STATE_ERROR_PASSIVE)) { - u16 cec = readw(®->cec); - u8 rxerr = cec; - u8 txerr = cec >> 8; - - cf->can_id |= CAN_ERR_CRTL; - if (state == CAN_STATE_ERROR_WARNING) { - priv->can.can_stats.error_warning++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } else { - priv->can.can_stats.error_passive++; - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - } - - if (status) { - priv->can.can_stats.bus_error++; - - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - - if (status & BEF) - cf->data[2] |= CAN_ERR_PROT_BIT; - else if (status & FER) - cf->data[2] |= CAN_ERR_PROT_FORM; - else if (status & SER) - cf->data[2] |= CAN_ERR_PROT_STUFF; - } - - priv->can.state = state; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - - return 0; -} - -static irqreturn_t bfin_can_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - struct net_device_stats *stats = &dev->stats; - u16 status, isrc; - - if ((irq == priv->tx_irq) && readw(®->mbtif2)) { - /* transmission complete interrupt */ - writew(0xFFFF, ®->mbtif2); - stats->tx_packets++; - stats->tx_bytes += readw(®->chl[TRANSMIT_CHL].dlc); - can_get_echo_skb(dev, 0); - netif_wake_queue(dev); - } else if ((irq == priv->rx_irq) && readw(®->mbrif1)) { - /* receive interrupt */ - isrc = readw(®->mbrif1); - writew(0xFFFF, ®->mbrif1); - bfin_can_rx(dev, isrc); - } else if ((irq == priv->err_irq) && readw(®->gis)) { - /* error interrupt */ - isrc = readw(®->gis); - status = readw(®->esr); - writew(0x7FF, ®->gis); - bfin_can_err(dev, isrc, status); - } else { - return IRQ_NONE; - } - - return IRQ_HANDLED; -} - -static int bfin_can_open(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - int err; - - /* set chip into reset mode */ - bfin_can_set_reset_mode(dev); - - /* common open */ - err = open_candev(dev); - if (err) - goto exit_open; - - /* register interrupt handler */ - err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0, - "bfin-can-rx", dev); - if (err) - goto exit_rx_irq; - err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0, - "bfin-can-tx", dev); - if (err) - goto exit_tx_irq; - err = request_irq(priv->err_irq, &bfin_can_interrupt, 0, - "bfin-can-err", dev); - if (err) - goto exit_err_irq; - - bfin_can_start(dev); - - netif_start_queue(dev); - - return 0; - -exit_err_irq: - free_irq(priv->tx_irq, dev); -exit_tx_irq: - free_irq(priv->rx_irq, dev); -exit_rx_irq: - close_candev(dev); -exit_open: - return err; -} - -static int bfin_can_close(struct net_device *dev) -{ - struct bfin_can_priv *priv = netdev_priv(dev); - - netif_stop_queue(dev); - bfin_can_set_reset_mode(dev); - - close_candev(dev); - - free_irq(priv->rx_irq, dev); - free_irq(priv->tx_irq, dev); - free_irq(priv->err_irq, dev); - - return 0; -} - -static struct net_device *alloc_bfin_candev(void) -{ - struct net_device *dev; - struct bfin_can_priv *priv; - - dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); - if (!dev) - return NULL; - - priv = netdev_priv(dev); - - priv->dev = dev; - priv->can.bittiming_const = &bfin_can_bittiming_const; - priv->can.do_set_bittiming = bfin_can_set_bittiming; - priv->can.do_set_mode = bfin_can_set_mode; - priv->can.do_get_berr_counter = bfin_can_get_berr_counter; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; - - return dev; -} - -static const struct net_device_ops bfin_can_netdev_ops = { - .ndo_open = bfin_can_open, - .ndo_stop = bfin_can_close, - .ndo_start_xmit = bfin_can_start_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static int bfin_can_probe(struct platform_device *pdev) -{ - int err; - struct net_device *dev; - struct bfin_can_priv *priv; - struct resource *res_mem, *rx_irq, *tx_irq, *err_irq; - unsigned short *pdata; - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data provided!\n"); - err = -EINVAL; - goto exit; - } - - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1); - err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2); - if (!res_mem || !rx_irq || !tx_irq || !err_irq) { - err = -EINVAL; - goto exit; - } - - /* request peripheral pins */ - err = peripheral_request_list(pdata, dev_name(&pdev->dev)); - if (err) - goto exit; - - dev = alloc_bfin_candev(); - if (!dev) { - err = -ENOMEM; - goto exit_peri_pin_free; - } - - priv = netdev_priv(dev); - - priv->membase = devm_ioremap_resource(&pdev->dev, res_mem); - if (IS_ERR(priv->membase)) { - err = PTR_ERR(priv->membase); - goto exit_peri_pin_free; - } - - priv->rx_irq = rx_irq->start; - priv->tx_irq = tx_irq->start; - priv->err_irq = err_irq->start; - priv->pin_list = pdata; - priv->can.clock.freq = get_sclk(); - - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - dev->flags |= IFF_ECHO; /* we support local echo */ - dev->netdev_ops = &bfin_can_netdev_ops; - - bfin_can_set_reset_mode(dev); - - err = register_candev(dev); - if (err) { - dev_err(&pdev->dev, "registering failed (err=%d)\n", err); - goto exit_candev_free; - } - - dev_info(&pdev->dev, - "%s device registered" - "(®_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n", - DRV_NAME, priv->membase, priv->rx_irq, - priv->tx_irq, priv->err_irq, priv->can.clock.freq); - return 0; - -exit_candev_free: - free_candev(dev); -exit_peri_pin_free: - peripheral_free_list(pdata); -exit: - return err; -} - -static int bfin_can_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct bfin_can_priv *priv = netdev_priv(dev); - - bfin_can_set_reset_mode(dev); - - unregister_candev(dev); - - peripheral_free_list(priv->pin_list); - - free_candev(dev); - return 0; -} - -#ifdef CONFIG_PM -static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - int timeout = BFIN_CAN_TIMEOUT; - - if (netif_running(dev)) { - /* enter sleep mode */ - writew(readw(®->control) | SMR, ®->control); - while (!(readw(®->intr) & SMACK)) { - udelay(10); - if (--timeout == 0) { - netdev_err(dev, "fail to enter sleep mode\n"); - BUG(); - } - } - } - - return 0; -} - -static int bfin_can_resume(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct bfin_can_priv *priv = netdev_priv(dev); - struct bfin_can_regs __iomem *reg = priv->membase; - - if (netif_running(dev)) { - /* leave sleep mode */ - writew(0, ®->intr); - } - - return 0; -} -#else -#define bfin_can_suspend NULL -#define bfin_can_resume NULL -#endif /* CONFIG_PM */ - -static struct platform_driver bfin_can_driver = { - .probe = bfin_can_probe, - .remove = bfin_can_remove, - .suspend = bfin_can_suspend, - .resume = bfin_can_resume, - .driver = { - .name = DRV_NAME, - }, -}; - -module_platform_driver(bfin_can_driver); - -MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..d4dd4da23997 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -67,12 +67,12 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); * otherwise 11 bit SFF messages. */ static int msgobj15_eff; -module_param(msgobj15_eff, int, S_IRUGO); +module_param(msgobj15_eff, int, 0444); MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " "(default: 11-bit standard frames)"); static int i82527_compat; -module_param(i82527_compat, int, S_IRUGO); +module_param(i82527_compat, int, 0444); MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " "without using additional functions"); @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, return 0; } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); + struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - dlc = cf->can_dlc; id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; + rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - /* Store echo skb before starting the transfer */ - can_put_echo_skb(skb, dev, 0); - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} - stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + netif_stop_queue(dev); + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_skb = skb; + cc770_tx(dev, mo); return NETDEV_TX_OK; } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); + struct can_frame *cf; + u8 ctrl1; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - /* Nothing more to send, switch off interrupts */ cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + + if (unlikely(!priv->tx_skb)) { + netdev_err(dev, "missing tx skb in tx interrupt\n"); + return; + } + + if (unlikely(ctrl1 & MSGLST_SET)) { + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* When the CC770 is sending an RTR message and it receives a regular + * message that matches the id of the RTR message, it will overwrite the + * outgoing message in the TX register. When this happens we must + * process the received message and try to transmit the outgoing skb + * again. */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + if (unlikely(ctrl1 & NEWDAT_SET)) { + cc770_rx(dev, mo, ctrl1); + cc770_tx(dev, mo); + return; + } + cf = (struct can_frame *)priv->tx_skb->data; + stats->tx_bytes += cf->can_dlc; stats->tx_packets++; + + can_put_echo_skb(priv->tx_skb, dev, 0); can_get_echo_skb(dev, 0); + priv->tx_skb = NULL; + netif_wake_queue(dev); } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv { u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ u8 bus_config; /* Bus conffiguration register */ + + struct sk_buff *tx_skb; }; struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index 3a30fd3b4498..fcd34698074f 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -82,29 +82,29 @@ static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; -module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO); +module_param_hw_array(port, ulong, ioport, NULL, 0444); MODULE_PARM_DESC(port, "I/O port number"); -module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO); +module_param_hw_array(mem, ulong, iomem, NULL, 0444); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO); +module_param_hw_array(indirect, int, ioport, NULL, 0444); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); -module_param_hw_array(irq, int, irq, NULL, S_IRUGO); +module_param_hw_array(irq, int, irq, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ number"); -module_param_array(clk, int, NULL, S_IRUGO); +module_param_array(clk, int, NULL, 0444); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); -module_param_array(cir, byte, NULL, S_IRUGO); +module_param_array(cir, byte, NULL, 0444); MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); -module_param_array(cor, byte, NULL, S_IRUGO); +module_param_array(cor, byte, NULL, 0444); MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); -module_param_array(bcr, byte, NULL, S_IRUGO); +module_param_array(bcr, byte, NULL, 0444); MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); #define CC770_IOSIZE 0x20 diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 897c6b113d3f..2d3046afa80d 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1484,7 +1484,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, } \ } \ module_param_named(name, grcan_module_config.name, \ - mtype, S_IRUGO); \ + mtype, 0444); \ MODULE_PARM_DESC(name, desc) #define GRCAN_CONFIG_ATTR(name, desc) \ @@ -1513,7 +1513,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, struct grcan_priv *priv = netdev_priv(dev); \ return sprintf(buf, "%d\n", priv->config.name); \ } \ - static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \ + static DEVICE_ATTR(name, 0644, \ grcan_show_##name, \ grcan_store_##name); \ GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc) diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2772d05ff11c..fedd927ba6ed 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -30,6 +30,7 @@ #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) #define IFI_CANFD_STCMD_BUSOFF BIT(4) +#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5) #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) #define IFI_CANFD_STCMD_LOOPBACK BIT(18) #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) @@ -52,7 +53,10 @@ #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) #define IFI_CANFD_INTERRUPT 0xc +#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0) #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) +#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) @@ -61,6 +65,10 @@ #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) #define IFI_CANFD_IRQMASK 0x10 +#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0) +#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1) +#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) #define IFI_CANFD_IRQMASK_SET_TS BIT(15) #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) @@ -136,6 +144,8 @@ #define IFI_CANFD_SYSCLOCK 0x50 #define IFI_CANFD_VER 0x54 +#define IFI_CANFD_VER_REV_MASK 0xff +#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15 #define IFI_CANFD_IP_ID 0x58 #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD @@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) if (enable) { enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | - IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; + IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | + IFI_CANFD_IRQMASK_ERROR_STATE_CHG | + IFI_CANFD_IRQMASK_ERROR_WARNING | + IFI_CANFD_IRQMASK_ERROR_BUSOFF; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; } @@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) return 1; } -static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) +static int ifi_canfd_handle_lec_err(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | @@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: + /* error active state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; + case CAN_STATE_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; priv->can.state = CAN_STATE_ERROR_WARNING; @@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, ifi_canfd_get_berr_counter(ndev, &bec); switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? @@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, return 1; } -static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) +static int ifi_canfd_handle_state_errors(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); int work_done = 0; - u32 isr; - /* - * The ErrWarn condition is a little special, since the bit is - * located in the INTERRUPT register instead of STCMD register. - */ - isr = readl(priv->base + IFI_CANFD_INTERRUPT); - if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && + if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && + (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { + netdev_dbg(ndev, "Error, entered active state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_ACTIVE); + } + + if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) && (priv->can.state != CAN_STATE_ERROR_WARNING)) { - /* Clear the interrupt */ - writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, - priv->base + IFI_CANFD_INTERRUPT); netdev_dbg(ndev, "Error, entered warning state\n"); work_done += ifi_canfd_handle_state_change(ndev, CAN_STATE_ERROR_WARNING); @@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct ifi_canfd_priv *priv = netdev_priv(ndev); - const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | - IFI_CANFD_STCMD_BUSOFF; - int work_done = 0; - - u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); - u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); + int work_done = 0; /* Handle bus state changes */ - if ((stcmd & stcmd_state_mask) || - ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) - work_done += ifi_canfd_handle_state_errors(ndev, stcmd); + work_done += ifi_canfd_handle_state_errors(ndev); /* Handle lost messages on RX */ if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) @@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) /* Handle lec errors on the bus */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) - work_done += ifi_canfd_handle_lec_err(ndev, errctr); + work_done += ifi_canfd_handle_lec_err(ndev); /* Handle normal messages on RX */ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) @@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) struct net_device_stats *stats = &ndev->stats; const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | + IFI_CANFD_INTERRUPT_ERROR_COUNTER | + IFI_CANFD_INTERRUPT_ERROR_STATE_CHG | IFI_CANFD_INTERRUPT_ERROR_WARNING | - IFI_CANFD_INTERRUPT_ERROR_COUNTER; + IFI_CANFD_INTERRUPT_ERROR_BUSOFF; const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; - const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | - IFI_CANFD_INTERRUPT_ERROR_WARNING)); + const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ); u32 isr; isr = readl(priv->base + IFI_CANFD_INTERRUPT); @@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) struct resource *res; void __iomem *addr; int irq, ret; - u32 id; + u32 id, rev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) return -EINVAL; } + rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; + if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { + dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", + rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); + return -EINVAL; + } + ndev = alloc_candev(sizeof(*priv), 1); if (!ndev) return -ENOMEM; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 12a53c8e8e1d..adfdb66a486e 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1865,9 +1865,9 @@ static ssize_t ican3_sysfs_show_fwinfo(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo); } -static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, - ican3_sysfs_set_term); -static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL); +static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term, + ican3_sysfs_set_term); +static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2594f7779c6f..b397a33f3d32 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -26,6 +26,7 @@ #include <linux/pm_runtime.h> #include <linux/iopoll.h> #include <linux/can/dev.h> +#include <linux/pinctrl/consumer.h> /* napi related */ #define M_CAN_NAPI_WEIGHT 64 @@ -253,7 +254,7 @@ enum m_can_mram_cfg { /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ #define RXFC_FWM_SHIFT 24 -#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) +#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) #define RXFC_FS_SHIFT 16 #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) @@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev) m_can_clk_stop(priv); } + pinctrl_pm_select_sleep_state(dev); + priv->can.state = CAN_STATE_SLEEPING; return 0; @@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct m_can_priv *priv = netdev_priv(ndev); + pinctrl_pm_select_default_state(dev); + m_can_init_ram(priv); priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 55513411a82e..ed8561d4a90f 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, spin_lock_irqsave(&priv->echo_lock, flags); can_get_echo_skb(priv->ndev, msg->client); - spin_unlock_irqrestore(&priv->echo_lock, flags); /* count bytes of the echo instead of skb */ stats->tx_bytes += cf_len; @@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, /* restart tx queue (a slot is free) */ netif_wake_queue(priv->ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); return 0; } @@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ if (pucan_status_is_rx_barrier(msg)) { - unsigned long flags; if (priv->enable_tx_path) { int err = priv->enable_tx_path(priv); @@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return err; } - /* restart network queue only if echo skb array is free */ - spin_lock_irqsave(&priv->echo_lock, flags); - - if (!priv->can.echo_skb[priv->echo_idx]) { - spin_unlock_irqrestore(&priv->echo_lock, flags); - - netif_wake_queue(ndev); - } else { - spin_unlock_irqrestore(&priv->echo_lock, flags); - } + /* start network queue (echo_skb array is empty) */ + netif_start_queue(ndev); return 0; } @@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, */ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); - spin_unlock_irqrestore(&priv->echo_lock, flags); - - /* write the skb on the interface */ - priv->write_tx_msg(priv, msg); - /* stop network tx queue if not enough room to save one more msg too */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) should_stop_tx_queue |= (room_left < @@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, if (should_stop_tx_queue) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* write the skb on the interface */ + priv->write_tx_msg(priv, msg); + return NETDEV_TX_OK; } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 788c3464a3b0..3c51a884db87 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg) priv->tx_pages_free++; spin_unlock_irqrestore(&priv->tx_lock, flags); - /* wake producer up */ - netif_wake_queue(priv->ucan.ndev); + /* wake producer up (only if enough room in echo_skb array) */ + spin_lock_irqsave(&priv->ucan.echo_lock, flags); + if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) + netif_wake_queue(priv->ucan.ndev); + + spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); } /* re-enable Rx DMA transfer for this CAN */ diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index a89c1e92554d..1a2ae6ce8d87 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -48,27 +48,27 @@ static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ -module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO); +module_param_hw_array(port, ulong, ioport, NULL, 0444); MODULE_PARM_DESC(port, "I/O port number"); -module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO); +module_param_hw_array(mem, ulong, iomem, NULL, 0444); MODULE_PARM_DESC(mem, "I/O memory address"); -module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO); +module_param_hw_array(indirect, int, ioport, NULL, 0444); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); -module_param_hw_array(irq, int, irq, NULL, S_IRUGO); +module_param_hw_array(irq, int, irq, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ number"); -module_param_array(clk, int, NULL, S_IRUGO); +module_param_array(clk, int, NULL, 0444); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); -module_param_array(cdr, byte, NULL, S_IRUGO); +module_param_array(cdr, byte, NULL, 0444); MODULE_PARM_DESC(cdr, "Clock divider register " "(default=0x48 [CDR_CBP | CDR_CLK_OFF])"); -module_param_array(ocr, byte, NULL, S_IRUGO); +module_param_array(ocr, byte, NULL, 0444); MODULE_PARM_DESC(ocr, "Output control register " "(default=0x18 [OCR_TX0_PUSHPULL])"); diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5f64deec9f6c..e22696190583 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -601,8 +601,8 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr, return count; } -static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); -static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); +static const DEVICE_ATTR(chip, 0444, show_chip, NULL); +static const DEVICE_ATTR(output, 0644, show_output, store_output); static const struct attribute *const netdev_sysfs_attrs[] = { &dev_attr_chip.attr, diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 98d118b3aaf4..e90817608645 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -220,7 +220,7 @@ #define DEVICE_NAME "mcp251x" static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ -module_param(mcp251x_enable_dma, int, S_IRUGO); +module_param(mcp251x_enable_dma, int, 0444); MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)"); static const struct can_bittiming_const mcp251x_bittiming_const = { diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index c6dcf93675c0..5820fd5b69b5 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -496,7 +496,7 @@ static ssize_t show_firmware(struct device *d, (dev->version >> 8) & 0xf, dev->version & 0xff); } -static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL); +static DEVICE_ATTR(firmware, 0444, show_firmware, NULL); static ssize_t show_hardware(struct device *d, struct device_attribute *attr, char *buf) @@ -509,7 +509,7 @@ static ssize_t show_hardware(struct device *d, (dev->version >> 24) & 0xf, (dev->version >> 16) & 0xff); } -static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL); +static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); static ssize_t show_nets(struct device *d, struct device_attribute *attr, char *buf) @@ -519,7 +519,7 @@ static ssize_t show_nets(struct device *d, return sprintf(buf, "%d", dev->net_count); } -static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL); +static DEVICE_ATTR(nets, 0444, show_nets, NULL); static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) { diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index c2b04f505e16..d200a5b0651c 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -65,7 +65,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME); */ static bool echo; /* echo testing. Default: 0 (Off) */ -module_param(echo, bool, S_IRUGO); +module_param(echo, bool, 0444); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); diff --git a/drivers/net/cris/Makefile b/drivers/net/cris/Makefile deleted file mode 100644 index b4e8932227b6..000000000000 --- a/drivers/net/cris/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c deleted file mode 100644 index 8b1a859f5140..000000000000 --- a/drivers/net/cris/eth_v10.c +++ /dev/null @@ -1,1742 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * e100net.c: A network driver for the ETRAX 100LX network controller. - * - * Copyright (c) 1998-2002 Axis Communications AB. - * - * The outline of this driver comes from skeleton.c. - * - */ - -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/spinlock.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/bitops.h> - -#include <linux/if.h> -#include <linux/mii.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/ethtool.h> - -#include <arch/svinto.h>/* DMA and register descriptions */ -#include <asm/io.h> /* CRIS_LED_* I/O functions */ -#include <asm/irq.h> -#include <asm/dma.h> -#include <asm/ethernet.h> -#include <asm/cache.h> -#include <arch/io_interface_mux.h> - -//#define ETHDEBUG -#define D(x) - -/* - * The name of the card. Is used for messages and in the requests for - * io regions, irqs and dma channels - */ - -static const char* cardname = "ETRAX 100LX built-in ethernet controller"; - -/* A default ethernet address. Highlevel SW will set the real one later */ - -static struct sockaddr default_mac = { - 0, - { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 } -}; - -/* Information that need to be kept for each board. */ -struct net_local { - struct mii_if_info mii_if; - - /* Tx control lock. This protects the transmit buffer ring - * state along with the "tx full" state of the driver. This - * means all netif_queue flow control actions are protected - * by this lock as well. - */ - spinlock_t lock; - - spinlock_t led_lock; /* Protect LED state */ - spinlock_t transceiver_lock; /* Protect transceiver state. */ -}; - -typedef struct etrax_eth_descr -{ - etrax_dma_descr descr; - struct sk_buff* skb; -} etrax_eth_descr; - -/* Some transceivers requires special handling */ -struct transceiver_ops -{ - unsigned int oui; - void (*check_speed)(struct net_device* dev); - void (*check_duplex)(struct net_device* dev); -}; - -/* Duplex settings */ -enum duplex -{ - half, - full, - autoneg -}; - -/* Dma descriptors etc. */ - -#define MAX_MEDIA_DATA_SIZE 1522 - -#define MIN_PACKET_LEN 46 -#define ETHER_HEAD_LEN 14 - -/* -** MDIO constants. -*/ -#define MDIO_START 0x1 -#define MDIO_READ 0x2 -#define MDIO_WRITE 0x1 -#define MDIO_PREAMBLE 0xfffffffful - -/* Broadcom specific */ -#define MDIO_AUX_CTRL_STATUS_REG 0x18 -#define MDIO_BC_FULL_DUPLEX_IND 0x1 -#define MDIO_BC_SPEED 0x2 - -/* TDK specific */ -#define MDIO_TDK_DIAGNOSTIC_REG 18 -#define MDIO_TDK_DIAGNOSTIC_RATE 0x400 -#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800 - -/*Intel LXT972A specific*/ -#define MDIO_INT_STATUS_REG_2 0x0011 -#define MDIO_INT_FULL_DUPLEX_IND (1 << 9) -#define MDIO_INT_SPEED (1 << 14) - -/* Network flash constants */ -#define NET_FLASH_TIME (HZ/50) /* 20 ms */ -#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */ -#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */ -#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */ - -#define NO_NETWORK_ACTIVITY 0 -#define NETWORK_ACTIVITY 1 - -#define NBR_OF_RX_DESC 32 -#define NBR_OF_TX_DESC 16 - -/* Large packets are sent directly to upper layers while small packets are */ -/* copied (to reduce memory waste). The following constant decides the breakpoint */ -#define RX_COPYBREAK 256 - -/* Due to a chip bug we need to flush the cache when descriptors are returned */ -/* to the DMA. To decrease performance impact we return descriptors in chunks. */ -/* The following constant determines the number of descriptors to return. */ -#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2 - -#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01) - -/* Define some macros to access ETRAX 100 registers */ -#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ - IO_FIELD_(reg##_, field##_, val) -#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ - IO_STATE_(reg##_, field##_, _##val) - -static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to - to be processed */ -static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ - -static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); - -static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */ -static etrax_eth_descr* myLastTxDesc; /* End of send queue */ -static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ -static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); - -static unsigned int network_rec_config_shadow = 0; - -static unsigned int network_tr_ctrl_shadow = 0; - -/* Timers */ -static void e100_check_speed(struct timer_list *unused); -static void e100_clear_network_leds(struct timer_list *unused); -static void e100_check_duplex(struct timer_list *unused); -static DEFINE_TIMER(speed_timer, e100_check_speed); -static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds); -static DEFINE_TIMER(duplex_timer, e100_check_duplex); -static struct net_device *timer_dev; - -/* Network speed indication. */ -static int current_speed; /* Speed read from transceiver */ -static int current_speed_selection; /* Speed selected by user */ -static unsigned long led_next_time; -static int led_active; -static int rx_queue_len; - -/* Duplex */ -static int full_duplex; -static enum duplex current_duplex; - -/* Index to functions, as function prototypes. */ - -static int etrax_ethernet_init(void); - -static int e100_open(struct net_device *dev); -static int e100_set_mac_address(struct net_device *dev, void *addr); -static int e100_send_packet(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id); -static irqreturn_t e100nw_interrupt(int irq, void *dev_id); -static void e100_rx(struct net_device *dev); -static int e100_close(struct net_device *dev); -static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); -static int e100_set_config(struct net_device* dev, struct ifmap* map); -static void e100_tx_timeout(struct net_device *dev); -static struct net_device_stats *e100_get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void e100_hardware_send_packet(struct net_local* np, char *buf, int length); -static void update_rx_stats(struct net_device_stats *); -static void update_tx_stats(struct net_device_stats *); -static int e100_probe_transceiver(struct net_device* dev); - -static void e100_set_speed(struct net_device* dev, unsigned long speed); -static void e100_set_duplex(struct net_device* dev, enum duplex); -static void e100_negotiate(struct net_device* dev); - -static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location); -static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value); - -static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd); -static void e100_send_mdio_bit(unsigned char bit); -static unsigned char e100_receive_mdio_bit(void); -static void e100_reset_transceiver(struct net_device* net); - -static void e100_set_network_leds(int active); - -static const struct ethtool_ops e100_ethtool_ops; -#if defined(CONFIG_ETRAX_NO_PHY) -static void dummy_check_speed(struct net_device* dev); -static void dummy_check_duplex(struct net_device* dev); -#else -static void broadcom_check_speed(struct net_device* dev); -static void broadcom_check_duplex(struct net_device* dev); -static void tdk_check_speed(struct net_device* dev); -static void tdk_check_duplex(struct net_device* dev); -static void intel_check_speed(struct net_device* dev); -static void intel_check_duplex(struct net_device* dev); -static void generic_check_speed(struct net_device* dev); -static void generic_check_duplex(struct net_device* dev); -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER -static void e100_netpoll(struct net_device* dev); -#endif - -static int autoneg_normal = 1; - -struct transceiver_ops transceivers[] = -{ -#if defined(CONFIG_ETRAX_NO_PHY) - {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */ -#else - {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ - {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ - {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ - {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ - {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ -#endif -}; - -struct transceiver_ops* transceiver = &transceivers[0]; - -static const struct net_device_ops e100_netdev_ops = { - .ndo_open = e100_open, - .ndo_stop = e100_close, - .ndo_start_xmit = e100_send_packet, - .ndo_tx_timeout = e100_tx_timeout, - .ndo_get_stats = e100_get_stats, - .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = e100_ioctl, - .ndo_set_mac_address = e100_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_config = e100_set_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = e100_netpoll, -#endif -}; - -#define tx_done(dev) (*R_DMA_CH0_CMD == 0) - -/* - * Check for a network adaptor of this type, and return '0' if one exists. - * If dev->base_addr == 0, probe all likely locations. - * If dev->base_addr == 1, always return failure. - * If dev->base_addr == 2, allocate space for the device and return success - * (detachable devices only). - */ - -static int __init -etrax_ethernet_init(void) -{ - struct net_device *dev; - struct net_local* np; - int i, err; - - printk(KERN_INFO - "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n"); - - if (cris_request_io_interface(if_eth, cardname)) { - printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n"); - return -EBUSY; - } - - dev = alloc_etherdev(sizeof(struct net_local)); - if (!dev) - return -ENOMEM; - - np = netdev_priv(dev); - - /* we do our own locking */ - dev->features |= NETIF_F_LLTX; - - dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ - - /* now setup our etrax specific stuff */ - - dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */ - dev->dma = NETWORK_RX_DMA_NBR; - - /* fill in our handlers so the network layer can talk to us in the future */ - - dev->ethtool_ops = &e100_ethtool_ops; - dev->netdev_ops = &e100_netdev_ops; - - spin_lock_init(&np->lock); - spin_lock_init(&np->led_lock); - spin_lock_init(&np->transceiver_lock); - - /* Initialise the list of Etrax DMA-descriptors */ - - /* Initialise receive descriptors */ - - for (i = 0; i < NBR_OF_RX_DESC; i++) { - /* Allocate two extra cachelines to make sure that buffer used - * by DMA does not share cacheline with any other data (to - * avoid cache bug) - */ - RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); - if (!RxDescList[i].skb) - return -ENOMEM; - RxDescList[i].descr.ctrl = 0; - RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE; - RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]); - RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data)); - RxDescList[i].descr.status = 0; - RxDescList[i].descr.hw_len = 0; - prepare_rx_descriptor(&RxDescList[i].descr); - } - - RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol; - RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]); - rx_queue_len = 0; - - /* Initialize transmit descriptors */ - for (i = 0; i < NBR_OF_TX_DESC; i++) { - TxDescList[i].descr.ctrl = 0; - TxDescList[i].descr.sw_len = 0; - TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr); - TxDescList[i].descr.buf = 0; - TxDescList[i].descr.status = 0; - TxDescList[i].descr.hw_len = 0; - TxDescList[i].skb = 0; - } - - TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol; - TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr); - - /* Initialise initial pointers */ - - myNextRxDesc = &RxDescList[0]; - myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; - myFirstTxDesc = &TxDescList[0]; - myNextTxDesc = &TxDescList[0]; - myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; - - /* Register device */ - err = register_netdev(dev); - if (err) { - free_netdev(dev); - return err; - } - - /* set the default MAC address */ - - e100_set_mac_address(dev, &default_mac); - - /* Initialize speed indicator stuff. */ - - current_speed = 10; - current_speed_selection = 0; /* Auto */ - speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; - - full_duplex = 0; - current_duplex = autoneg; - duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; - - timer_dev = dev; - - /* Initialize mii interface */ - np->mii_if.phy_id_mask = 0x1f; - np->mii_if.reg_num_mask = 0x1f; - np->mii_if.dev = dev; - np->mii_if.mdio_read = e100_get_mdio_reg; - np->mii_if.mdio_write = e100_set_mdio_reg; - - /* Initialize group address registers to make sure that no */ - /* unwanted addresses are matched */ - *R_NETWORK_GA_0 = 0x00000000; - *R_NETWORK_GA_1 = 0x00000000; - - /* Initialize next time the led can flash */ - led_next_time = jiffies; - return 0; -} -device_initcall(etrax_ethernet_init) - -/* set MAC address of the interface. called from the core after a - * SIOCSIFADDR ioctl, and from the bootup above. - */ - -static int -e100_set_mac_address(struct net_device *dev, void *p) -{ - struct net_local *np = netdev_priv(dev); - struct sockaddr *addr = p; - - spin_lock(&np->lock); /* preemption protection */ - - /* remember it */ - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - /* Write it to the hardware. - * Note the way the address is wrapped: - * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24); - * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8); - */ - - *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | - (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); - *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); - *R_NETWORK_SA_2 = 0; - - /* show it in the log as well */ - - printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr); - - spin_unlock(&np->lock); - - return 0; -} - -/* - * Open/initialize the board. This is called (in the current kernel) - * sometime after booting when the 'ifconfig' program is run. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is non-reboot way to recover if something goes wrong. - */ - -static int -e100_open(struct net_device *dev) -{ - unsigned long flags; - - /* enable the MDIO output pin */ - - *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable); - - *R_IRQ_MASK0_CLR = - IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | - IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | - IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); - - /* clear dma0 and 1 eop and descr irq masks */ - *R_IRQ_MASK2_CLR = - IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); - - /* Reset and wait for the DMA channels */ - - RESET_DMA(NETWORK_TX_DMA_NBR); - RESET_DMA(NETWORK_RX_DMA_NBR); - WAIT_DMA(NETWORK_TX_DMA_NBR); - WAIT_DMA(NETWORK_RX_DMA_NBR); - - /* Initialise the etrax network controller */ - - /* allocate the irq corresponding to the receiving DMA */ - - if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname, - (void *)dev)) { - goto grace_exit0; - } - - /* allocate the irq corresponding to the transmitting DMA */ - - if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0, - cardname, (void *)dev)) { - goto grace_exit1; - } - - /* allocate the irq corresponding to the network errors etc */ - - if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0, - cardname, (void *)dev)) { - goto grace_exit2; - } - - /* - * Always allocate the DMA channels after the IRQ, - * and clean up on failure. - */ - - if (cris_request_dma(NETWORK_TX_DMA_NBR, - cardname, - DMA_VERBOSE_ON_ERROR, - dma_eth)) { - goto grace_exit3; - } - - if (cris_request_dma(NETWORK_RX_DMA_NBR, - cardname, - DMA_VERBOSE_ON_ERROR, - dma_eth)) { - goto grace_exit4; - } - - /* give the HW an idea of what MAC address we want */ - - *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | - (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); - *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); - *R_NETWORK_SA_2 = 0; - -#if 0 - /* use promiscuous mode for testing */ - *R_NETWORK_GA_0 = 0xffffffff; - *R_NETWORK_GA_1 = 0xffffffff; - - *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ -#else - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522); - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); - SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; -#endif - - *R_NETWORK_GEN_CONFIG = - IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) | - IO_STATE(R_NETWORK_GEN_CONFIG, enable, on); - - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable); - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); - *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; - - local_irq_save(flags); - - /* enable the irq's for ethernet DMA */ - - *R_IRQ_MASK2_SET = - IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | - IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); - - *R_IRQ_MASK0_SET = - IO_STATE(R_IRQ_MASK0_SET, overrun, set) | - IO_STATE(R_IRQ_MASK0_SET, underrun, set) | - IO_STATE(R_IRQ_MASK0_SET, excessive_col, set); - - /* make sure the irqs are cleared */ - - *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); - *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); - - /* make sure the rec and transmit error counters are cleared */ - - (void)*R_REC_COUNTERS; /* dummy read */ - (void)*R_TR_COUNTERS; /* dummy read */ - - /* start the receiving DMA channel so we can receive packets from now on */ - - *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc); - *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start); - - /* Set up transmit DMA channel so it can be restarted later */ - - *R_DMA_CH0_FIRST = 0; - *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); - netif_start_queue(dev); - - local_irq_restore(flags); - - /* Probe for transceiver */ - if (e100_probe_transceiver(dev)) - goto grace_exit5; - - /* Start duplex/speed timers */ - add_timer(&speed_timer); - add_timer(&duplex_timer); - - /* We are now ready to accept transmit requeusts from - * the queueing layer of the networking. - */ - netif_carrier_on(dev); - - return 0; - -grace_exit5: - cris_free_dma(NETWORK_RX_DMA_NBR, cardname); -grace_exit4: - cris_free_dma(NETWORK_TX_DMA_NBR, cardname); -grace_exit3: - free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); -grace_exit2: - free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); -grace_exit1: - free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); -grace_exit0: - return -EAGAIN; -} - -#if defined(CONFIG_ETRAX_NO_PHY) -static void -dummy_check_speed(struct net_device* dev) -{ - current_speed = 100; -} -#else -static void -generic_check_speed(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); - if ((data & ADVERTISE_100FULL) || - (data & ADVERTISE_100HALF)) - current_speed = 100; - else - current_speed = 10; -} - -static void -tdk_check_speed(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_TDK_DIAGNOSTIC_REG); - current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); -} - -static void -broadcom_check_speed(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_AUX_CTRL_STATUS_REG); - current_speed = (data & MDIO_BC_SPEED ? 100 : 10); -} - -static void -intel_check_speed(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_INT_STATUS_REG_2); - current_speed = (data & MDIO_INT_SPEED ? 100 : 10); -} -#endif -static void -e100_check_speed(struct timer_list *unused) -{ - struct net_device* dev = timer_dev; - struct net_local *np = netdev_priv(dev); - static int led_initiated = 0; - unsigned long data; - int old_speed = current_speed; - - spin_lock(&np->transceiver_lock); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR); - if (!(data & BMSR_LSTATUS)) { - current_speed = 0; - } else { - transceiver->check_speed(dev); - } - - spin_lock(&np->led_lock); - if ((old_speed != current_speed) || !led_initiated) { - led_initiated = 1; - e100_set_network_leds(NO_NETWORK_ACTIVITY); - if (current_speed) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - } - spin_unlock(&np->led_lock); - - /* Reinitialize the timer. */ - speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; - add_timer(&speed_timer); - - spin_unlock(&np->transceiver_lock); -} - -static void -e100_negotiate(struct net_device* dev) -{ - struct net_local *np = netdev_priv(dev); - unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MII_ADVERTISE); - - /* Discard old speed and duplex settings */ - data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | - ADVERTISE_10HALF | ADVERTISE_10FULL); - - switch (current_speed_selection) { - case 10: - if (current_duplex == full) - data |= ADVERTISE_10FULL; - else if (current_duplex == half) - data |= ADVERTISE_10HALF; - else - data |= ADVERTISE_10HALF | ADVERTISE_10FULL; - break; - - case 100: - if (current_duplex == full) - data |= ADVERTISE_100FULL; - else if (current_duplex == half) - data |= ADVERTISE_100HALF; - else - data |= ADVERTISE_100HALF | ADVERTISE_100FULL; - break; - - case 0: /* Auto */ - if (current_duplex == full) - data |= ADVERTISE_100FULL | ADVERTISE_10FULL; - else if (current_duplex == half) - data |= ADVERTISE_100HALF | ADVERTISE_10HALF; - else - data |= ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL; - break; - - default: /* assume autoneg speed and duplex */ - data |= ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL; - break; - } - - e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); - if (autoneg_normal) { - /* Renegotiate with link partner */ - data |= BMCR_ANENABLE | BMCR_ANRESTART; - } else { - /* Don't negotiate speed or duplex */ - data &= ~(BMCR_ANENABLE | BMCR_ANRESTART); - - /* Set speed and duplex static */ - if (current_speed_selection == 10) - data &= ~BMCR_SPEED100; - else - data |= BMCR_SPEED100; - - if (current_duplex != full) - data &= ~BMCR_FULLDPLX; - else - data |= BMCR_FULLDPLX; - } - e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); -} - -static void -e100_set_speed(struct net_device* dev, unsigned long speed) -{ - struct net_local *np = netdev_priv(dev); - - spin_lock(&np->transceiver_lock); - if (speed != current_speed_selection) { - current_speed_selection = speed; - e100_negotiate(dev); - } - spin_unlock(&np->transceiver_lock); -} - -static void -e100_check_duplex(struct timer_list *unused) -{ - struct net_device *dev = timer_dev; - struct net_local *np = netdev_priv(dev); - int old_duplex; - - spin_lock(&np->transceiver_lock); - old_duplex = full_duplex; - transceiver->check_duplex(dev); - if (old_duplex != full_duplex) { - /* Duplex changed */ - SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; - } - - /* Reinitialize the timer. */ - duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; - add_timer(&duplex_timer); - np->mii_if.full_duplex = full_duplex; - spin_unlock(&np->transceiver_lock); -} -#if defined(CONFIG_ETRAX_NO_PHY) -static void -dummy_check_duplex(struct net_device* dev) -{ - full_duplex = 1; -} -#else -static void -generic_check_duplex(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); - if ((data & ADVERTISE_10FULL) || - (data & ADVERTISE_100FULL)) - full_duplex = 1; - else - full_duplex = 0; -} - -static void -tdk_check_duplex(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_TDK_DIAGNOSTIC_REG); - full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; -} - -static void -broadcom_check_duplex(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_AUX_CTRL_STATUS_REG); - full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; -} - -static void -intel_check_duplex(struct net_device* dev) -{ - unsigned long data; - struct net_local *np = netdev_priv(dev); - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, - MDIO_INT_STATUS_REG_2); - full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; -} -#endif -static void -e100_set_duplex(struct net_device* dev, enum duplex new_duplex) -{ - struct net_local *np = netdev_priv(dev); - - spin_lock(&np->transceiver_lock); - if (new_duplex != current_duplex) { - current_duplex = new_duplex; - e100_negotiate(dev); - } - spin_unlock(&np->transceiver_lock); -} - -static int -e100_probe_transceiver(struct net_device* dev) -{ - int ret = 0; - -#if !defined(CONFIG_ETRAX_NO_PHY) - unsigned int phyid_high; - unsigned int phyid_low; - unsigned int oui; - struct transceiver_ops* ops = NULL; - struct net_local *np = netdev_priv(dev); - - spin_lock(&np->transceiver_lock); - - /* Probe MDIO physical address */ - for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31; - np->mii_if.phy_id++) { - if (e100_get_mdio_reg(dev, - np->mii_if.phy_id, MII_BMSR) != 0xffff) - break; - } - if (np->mii_if.phy_id == 32) { - ret = -ENODEV; - goto out; - } - - /* Get manufacturer */ - phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1); - phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2); - oui = (phyid_high << 6) | (phyid_low >> 10); - - for (ops = &transceivers[0]; ops->oui; ops++) { - if (ops->oui == oui) - break; - } - transceiver = ops; -out: - spin_unlock(&np->transceiver_lock); -#endif - return ret; -} - -static int -e100_get_mdio_reg(struct net_device *dev, int phy_id, int location) -{ - unsigned short cmd; /* Data to be sent on MDIO port */ - int data; /* Data read from MDIO */ - int bitCounter; - - /* Start of frame, OP Code, Physical Address, Register Address */ - cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) | - (location << 2); - - e100_send_mdio_cmd(cmd, 0); - - data = 0; - - /* Data... */ - for (bitCounter=15; bitCounter>=0 ; bitCounter--) { - data |= (e100_receive_mdio_bit() << bitCounter); - } - - return data; -} - -static void -e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value) -{ - int bitCounter; - unsigned short cmd; - - cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) | - (location << 2); - - e100_send_mdio_cmd(cmd, 1); - - /* Data... */ - for (bitCounter=15; bitCounter>=0 ; bitCounter--) { - e100_send_mdio_bit(GET_BIT(bitCounter, value)); - } - -} - -static void -e100_send_mdio_cmd(unsigned short cmd, int write_cmd) -{ - int bitCounter; - unsigned char data = 0x2; - - /* Preamble */ - for (bitCounter = 31; bitCounter>= 0; bitCounter--) - e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); - - for (bitCounter = 15; bitCounter >= 2; bitCounter--) - e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); - - /* Turnaround */ - for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) - if (write_cmd) - e100_send_mdio_bit(GET_BIT(bitCounter, data)); - else - e100_receive_mdio_bit(); -} - -static void -e100_send_mdio_bit(unsigned char bit) -{ - *R_NETWORK_MGM_CTRL = - IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | - IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); - udelay(1); - *R_NETWORK_MGM_CTRL = - IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | - IO_MASK(R_NETWORK_MGM_CTRL, mdck) | - IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); - udelay(1); -} - -static unsigned char -e100_receive_mdio_bit(void) -{ - unsigned char bit; - *R_NETWORK_MGM_CTRL = 0; - bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); - udelay(1); - *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); - udelay(1); - return bit; -} - -static void -e100_reset_transceiver(struct net_device* dev) -{ - struct net_local *np = netdev_priv(dev); - unsigned short cmd; - unsigned short data; - int bitCounter; - - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); - - cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2); - - e100_send_mdio_cmd(cmd, 1); - - data |= 0x8000; - - for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { - e100_send_mdio_bit(GET_BIT(bitCounter, data)); - } -} - -/* Called by upper layers if they decide it took too long to complete - * sending a packet - we need to reset and stuff. - */ - -static void -e100_tx_timeout(struct net_device *dev) -{ - struct net_local *np = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&np->lock, flags); - - printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, - tx_done(dev) ? "IRQ problem" : "network cable problem"); - - /* remember we got an error */ - - dev->stats.tx_errors++; - - /* reset the TX DMA in case it has hung on something */ - - RESET_DMA(NETWORK_TX_DMA_NBR); - WAIT_DMA(NETWORK_TX_DMA_NBR); - - /* Reset the transceiver. */ - - e100_reset_transceiver(dev); - - /* and get rid of the packets that never got an interrupt */ - while (myFirstTxDesc != myNextTxDesc) { - dev_kfree_skb(myFirstTxDesc->skb); - myFirstTxDesc->skb = 0; - myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); - } - - /* Set up transmit DMA channel so it can be restarted later */ - *R_DMA_CH0_FIRST = 0; - *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); - - /* tell the upper layers we're ok again */ - - netif_wake_queue(dev); - spin_unlock_irqrestore(&np->lock, flags); -} - - -/* This will only be invoked if the driver is _not_ in XOFF state. - * What this means is that we need not check it, and that this - * invariant will hold if we make sure that the netif_*_queue() - * calls are done at the proper times. - */ - -static int -e100_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - struct net_local *np = netdev_priv(dev); - unsigned char *buf = skb->data; - unsigned long flags; - -#ifdef ETHDEBUG - printk("send packet len %d\n", length); -#endif - spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */ - - myNextTxDesc->skb = skb; - - netif_trans_update(dev); /* NETIF_F_LLTX driver :( */ - - e100_hardware_send_packet(np, buf, skb->len); - - myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); - - /* Stop queue if full */ - if (myNextTxDesc == myFirstTxDesc) { - netif_stop_queue(dev); - } - - spin_unlock_irqrestore(&np->lock, flags); - - return NETDEV_TX_OK; -} - -/* - * The typical workload of the driver: - * Handle the network interface interrupts. - */ - -static irqreturn_t -e100rxtx_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - unsigned long irqbits; - - /* - * Note that both rx and tx interrupts are blocked at this point, - * regardless of which got us here. - */ - - irqbits = *R_IRQ_MASK2_RD; - - /* Handle received packets */ - if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { - /* acknowledge the eop interrupt */ - - *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); - - /* check if one or more complete packets were indeed received */ - - while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) && - (myNextRxDesc != myLastRxDesc)) { - /* Take out the buffer and give it to the OS, then - * allocate a new buffer to put a packet in. - */ - e100_rx(dev); - dev->stats.rx_packets++; - /* restart/continue on the channel, for safety */ - *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); - /* clear dma channel 1 eop/descr irq bits */ - *R_DMA_CH1_CLR_INTR = - IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) | - IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do); - - /* now, we might have gotten another packet - so we have to loop back and check if so */ - } - } - - /* Report any packets that have been sent */ - while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && - (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { - dev->stats.tx_bytes += myFirstTxDesc->skb->len; - dev->stats.tx_packets++; - - /* dma is ready with the transmission of the data in tx_skb, so now - we can release the skb memory */ - dev_kfree_skb_irq(myFirstTxDesc->skb); - myFirstTxDesc->skb = 0; - myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); - /* Wake up queue. */ - netif_wake_queue(dev); - } - - if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { - /* acknowledge the eop interrupt. */ - *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); - } - - return IRQ_HANDLED; -} - -static irqreturn_t -e100nw_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - unsigned long irqbits = *R_IRQ_MASK0_RD; - - /* check for underrun irq */ - if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) { - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); - *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); - dev->stats.tx_errors++; - D(printk("ethernet receiver underrun!\n")); - } - - /* check for overrun irq */ - if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { - update_rx_stats(&dev->stats); /* this will ack the irq */ - D(printk("ethernet receiver overrun!\n")); - } - /* check for excessive collision irq */ - if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) { - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); - *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; - SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); - dev->stats.tx_errors++; - D(printk("ethernet excessive collisions!\n")); - } - return IRQ_HANDLED; -} - -/* We have a good packet(s), get it/them out of the buffers. */ -static void -e100_rx(struct net_device *dev) -{ - struct sk_buff *skb; - int length = 0; - struct net_local *np = netdev_priv(dev); - unsigned char *skb_data_ptr; -#ifdef ETHDEBUG - int i; -#endif - etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */ - spin_lock(&np->led_lock); - if (!led_active && time_after(jiffies, led_next_time)) { - /* light the network leds depending on the current speed. */ - e100_set_network_leds(NETWORK_ACTIVITY); - - /* Set the earliest time we may clear the LED */ - led_next_time = jiffies + NET_FLASH_TIME; - led_active = 1; - mod_timer(&clear_led_timer, jiffies + HZ/10); - } - spin_unlock(&np->led_lock); - - length = myNextRxDesc->descr.hw_len - 4; - dev->stats.rx_bytes += length; - -#ifdef ETHDEBUG - printk("Got a packet of length %d:\n", length); - /* dump the first bytes in the packet */ - skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf); - for (i = 0; i < 8; i++) { - printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8, - skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3], - skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]); - skb_data_ptr += 8; - } -#endif - - if (length < RX_COPYBREAK) { - /* Small packet, copy data */ - skb = dev_alloc_skb(length - ETHER_HEAD_LEN); - if (!skb) { - dev->stats.rx_errors++; - printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - goto update_nextrxdesc; - } - - skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ - skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */ - -#ifdef ETHDEBUG - printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", - skb->head, skb->data, skb_tail_pointer(skb), - skb_end_pointer(skb)); - printk("copying packet to 0x%x.\n", skb_data_ptr); -#endif - - memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length); - } - else { - /* Large packet, send directly to upper layers and allocate new - * memory (aligned to cache line boundary to avoid bug). - * Before sending the skb to upper layers we must make sure - * that skb->data points to the aligned start of the packet. - */ - int align; - struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); - if (!new_skb) { - dev->stats.rx_errors++; - printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - goto update_nextrxdesc; - } - skb = myNextRxDesc->skb; - align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; - skb_put(skb, length + align); - skb_pull(skb, align); /* Remove alignment bytes */ - myNextRxDesc->skb = new_skb; - myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); - } - - skb->protocol = eth_type_trans(skb, dev); - - /* Send the packet to the upper layers */ - netif_rx(skb); - - update_nextrxdesc: - /* Prepare for next packet */ - myNextRxDesc->descr.status = 0; - prevRxDesc = myNextRxDesc; - myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); - - rx_queue_len++; - - /* Check if descriptors should be returned */ - if (rx_queue_len == RX_QUEUE_THRESHOLD) { - flush_etrax_cache(); - prevRxDesc->descr.ctrl |= d_eol; - myLastRxDesc->descr.ctrl &= ~d_eol; - myLastRxDesc = prevRxDesc; - rx_queue_len = 0; - } -} - -/* The inverse routine to net_open(). */ -static int -e100_close(struct net_device *dev) -{ - printk(KERN_INFO "Closing %s.\n", dev->name); - - netif_stop_queue(dev); - - *R_IRQ_MASK0_CLR = - IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | - IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | - IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); - - *R_IRQ_MASK2_CLR = - IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); - - /* Stop the receiver and the transmitter */ - - RESET_DMA(NETWORK_TX_DMA_NBR); - RESET_DMA(NETWORK_RX_DMA_NBR); - - /* Flush the Tx and disable Rx here. */ - - free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); - free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); - free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); - - cris_free_dma(NETWORK_TX_DMA_NBR, cardname); - cris_free_dma(NETWORK_RX_DMA_NBR, cardname); - - /* Update the statistics here. */ - - update_rx_stats(&dev->stats); - update_tx_stats(&dev->stats); - - /* Stop speed/duplex timers */ - del_timer(&speed_timer); - del_timer(&duplex_timer); - - return 0; -} - -static int -e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mii_ioctl_data *data = if_mii(ifr); - struct net_local *np = netdev_priv(dev); - int rc = 0; - int old_autoneg; - - spin_lock(&np->lock); /* Preempt protection */ - switch (cmd) { - /* The ioctls below should be considered obsolete but are */ - /* still present for compatibility with old scripts/apps */ - case SET_ETH_SPEED_10: /* 10 Mbps */ - e100_set_speed(dev, 10); - break; - case SET_ETH_SPEED_100: /* 100 Mbps */ - e100_set_speed(dev, 100); - break; - case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */ - e100_set_speed(dev, 0); - break; - case SET_ETH_DUPLEX_HALF: /* Half duplex */ - e100_set_duplex(dev, half); - break; - case SET_ETH_DUPLEX_FULL: /* Full duplex */ - e100_set_duplex(dev, full); - break; - case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */ - e100_set_duplex(dev, autoneg); - break; - case SET_ETH_AUTONEG: - old_autoneg = autoneg_normal; - autoneg_normal = *(int*)data; - if (autoneg_normal != old_autoneg) - e100_negotiate(dev); - break; - default: - rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr), - cmd, NULL); - break; - } - spin_unlock(&np->lock); - return rc; -} - -static int e100_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct net_local *np = netdev_priv(dev); - u32 supported; - - spin_lock_irq(&np->lock); - mii_ethtool_get_link_ksettings(&np->mii_if, cmd); - spin_unlock_irq(&np->lock); - - /* The PHY may support 1000baseT, but the Etrax100 does not. */ - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - - supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - - return 0; -} - -static int e100_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *ecmd) -{ - if (ecmd->base.autoneg == AUTONEG_ENABLE) { - e100_set_duplex(dev, autoneg); - e100_set_speed(dev, 0); - } else { - e100_set_duplex(dev, ecmd->base.duplex == DUPLEX_HALF ? - half : full); - e100_set_speed(dev, ecmd->base.speed == SPEED_10 ? 10 : 100); - } - - return 0; -} - -static void e100_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver)); - strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); -} - -static int e100_nway_reset(struct net_device *dev) -{ - if (current_duplex == autoneg && current_speed_selection == 0) - e100_negotiate(dev); - return 0; -} - -static const struct ethtool_ops e100_ethtool_ops = { - .get_drvinfo = e100_get_drvinfo, - .nway_reset = e100_nway_reset, - .get_link = ethtool_op_get_link, - .get_link_ksettings = e100_get_link_ksettings, - .set_link_ksettings = e100_set_link_ksettings, -}; - -static int -e100_set_config(struct net_device *dev, struct ifmap *map) -{ - struct net_local *np = netdev_priv(dev); - - spin_lock(&np->lock); /* Preempt protection */ - - switch(map->port) { - case IF_PORT_UNKNOWN: - /* Use autoneg */ - e100_set_speed(dev, 0); - e100_set_duplex(dev, autoneg); - break; - case IF_PORT_10BASET: - e100_set_speed(dev, 10); - e100_set_duplex(dev, autoneg); - break; - case IF_PORT_100BASET: - case IF_PORT_100BASETX: - e100_set_speed(dev, 100); - e100_set_duplex(dev, autoneg); - break; - case IF_PORT_100BASEFX: - case IF_PORT_10BASE2: - case IF_PORT_AUI: - spin_unlock(&np->lock); - return -EOPNOTSUPP; - default: - printk(KERN_ERR "%s: Invalid media selected", dev->name); - spin_unlock(&np->lock); - return -EINVAL; - } - spin_unlock(&np->lock); - return 0; -} - -static void -update_rx_stats(struct net_device_stats *es) -{ - unsigned long r = *R_REC_COUNTERS; - /* update stats relevant to reception errors */ - es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r); - es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r); - es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r); - es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r); -} - -static void -update_tx_stats(struct net_device_stats *es) -{ - unsigned long r = *R_TR_COUNTERS; - /* update stats relevant to transmission errors */ - es->collisions += - IO_EXTRACT(R_TR_COUNTERS, single_col, r) + - IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); -} - -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats * -e100_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - - update_rx_stats(&dev->stats); - update_tx_stats(&dev->stats); - - spin_unlock_irqrestore(&lp->lock, flags); - return &dev->stats; -} - -/* - * Set or clear the multicast filter for this adaptor. - * num_addrs == -1 Promiscuous mode, receive all packets - * num_addrs == 0 Normal mode, clear multicast list - * num_addrs > 0 Multicast mode, receive normal and MC packets, - * and do best-effort filtering. - */ -static void -set_multicast_list(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int num_addr = netdev_mc_count(dev); - unsigned long int lo_bits; - unsigned long int hi_bits; - - spin_lock(&lp->lock); - if (dev->flags & IFF_PROMISC) { - /* promiscuous mode */ - lo_bits = 0xfffffffful; - hi_bits = 0xfffffffful; - - /* Enable individual receive */ - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; - } else if (dev->flags & IFF_ALLMULTI) { - /* enable all multicasts */ - lo_bits = 0xfffffffful; - hi_bits = 0xfffffffful; - - /* Disable individual receive */ - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; - } else if (num_addr == 0) { - /* Normal, clear the mc list */ - lo_bits = 0x00000000ul; - hi_bits = 0x00000000ul; - - /* Disable individual receive */ - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; - } else { - /* MC mode, receive normal and MC packets */ - char hash_ix; - struct netdev_hw_addr *ha; - char *baddr; - - lo_bits = 0x00000000ul; - hi_bits = 0x00000000ul; - netdev_for_each_mc_addr(ha, dev) { - /* Calculate the hash index for the GA registers */ - - hash_ix = 0; - baddr = ha->addr; - hash_ix ^= (*baddr) & 0x3f; - hash_ix ^= ((*baddr) >> 6) & 0x03; - ++baddr; - hash_ix ^= ((*baddr) << 2) & 0x03c; - hash_ix ^= ((*baddr) >> 4) & 0xf; - ++baddr; - hash_ix ^= ((*baddr) << 4) & 0x30; - hash_ix ^= ((*baddr) >> 2) & 0x3f; - ++baddr; - hash_ix ^= (*baddr) & 0x3f; - hash_ix ^= ((*baddr) >> 6) & 0x03; - ++baddr; - hash_ix ^= ((*baddr) << 2) & 0x03c; - hash_ix ^= ((*baddr) >> 4) & 0xf; - ++baddr; - hash_ix ^= ((*baddr) << 4) & 0x30; - hash_ix ^= ((*baddr) >> 2) & 0x3f; - - hash_ix &= 0x3f; - - if (hash_ix >= 32) { - hi_bits |= (1 << (hash_ix-32)); - } else { - lo_bits |= (1 << hash_ix); - } - } - /* Disable individual receive */ - SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); - *R_NETWORK_REC_CONFIG = network_rec_config_shadow; - } - *R_NETWORK_GA_0 = lo_bits; - *R_NETWORK_GA_1 = hi_bits; - spin_unlock(&lp->lock); -} - -void -e100_hardware_send_packet(struct net_local *np, char *buf, int length) -{ - D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); - - spin_lock(&np->led_lock); - if (!led_active && time_after(jiffies, led_next_time)) { - /* light the network leds depending on the current speed. */ - e100_set_network_leds(NETWORK_ACTIVITY); - - /* Set the earliest time we may clear the LED */ - led_next_time = jiffies + NET_FLASH_TIME; - led_active = 1; - mod_timer(&clear_led_timer, jiffies + HZ/10); - } - spin_unlock(&np->led_lock); - - /* configure the tx dma descriptor */ - myNextTxDesc->descr.sw_len = length; - myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait; - myNextTxDesc->descr.buf = virt_to_phys(buf); - - /* Move end of list */ - myLastTxDesc->descr.ctrl &= ~d_eol; - myLastTxDesc = myNextTxDesc; - - /* Restart DMA channel */ - *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart); -} - -static void -e100_clear_network_leds(struct timer_list *unused) -{ - struct net_device *dev = timer_dev; - struct net_local *np = netdev_priv(dev); - - spin_lock(&np->led_lock); - - if (led_active && time_after(jiffies, led_next_time)) { - e100_set_network_leds(NO_NETWORK_ACTIVITY); - - /* Set the earliest time we may set the LED */ - led_next_time = jiffies + NET_FLASH_PAUSE; - led_active = 0; - } - - spin_unlock(&np->led_lock); -} - -static void -e100_set_network_leds(int active) -{ -#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK) - int light_leds = (active == NO_NETWORK_ACTIVITY); -#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY) - int light_leds = (active == NETWORK_ACTIVITY); -#else -#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY" -#endif - - if (!current_speed) { - /* Make LED red, link is down */ - CRIS_LED_NETWORK_SET(CRIS_LED_OFF); - } else if (light_leds) { - if (current_speed == 10) { - CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); - } else { - CRIS_LED_NETWORK_SET(CRIS_LED_GREEN); - } - } else { - CRIS_LED_NETWORK_SET(CRIS_LED_OFF); - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void -e100_netpoll(struct net_device* netdev) -{ - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); -} -#endif - - -static int __init -e100_boot_setup(char* str) -{ - struct sockaddr sa = {0}; - int i; - - /* Parse the colon separated Ethernet station address */ - for (i = 0; i < ETH_ALEN; i++) { - unsigned int tmp; - if (sscanf(str + 3*i, "%2x", &tmp) != 1) { - printk(KERN_WARNING "Malformed station address"); - return 0; - } - sa.sa_data[i] = (char)tmp; - } - - default_mac = sa; - return 1; -} - -__setup("etrax100_eth=", e100_boot_setup); diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d040aeb45172..15c2a831edf1 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o -obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o +obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o +ifdef CONFIG_NET_DSA_LOOP +obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o +endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index db830a1141d9..78616787f2a3 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) unsigned int i; for (i = 0; i < mib_size; i++) - memcpy(data + i * ETH_GSTRING_LEN, - mibs[i].name, ETH_GSTRING_LEN); + strlcpy(data + i * ETH_GSTRING_LEN, + mibs[i].name, ETH_GSTRING_LEN); } EXPORT_SYMBOL(b53_get_strings); @@ -852,7 +852,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) } EXPORT_SYMBOL(b53_get_ethtool_stats); -int b53_get_sset_count(struct dsa_switch *ds) +int b53_get_sset_count(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index ef63d24fef81..c628d0980c0b 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -30,7 +30,8 @@ struct b53_mmap_priv { static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; *val = readb(regs + (page << 8) + reg); @@ -39,7 +40,8 @@ static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; if (WARN_ON(reg % 2)) return -EINVAL; @@ -54,7 +56,8 @@ static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; if (WARN_ON(reg % 4)) return -EINVAL; @@ -69,7 +72,8 @@ static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; if (WARN_ON(reg % 2)) return -EINVAL; @@ -107,7 +111,8 @@ static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; u32 hi, lo; if (WARN_ON(reg % 4)) @@ -128,7 +133,8 @@ static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; writeb(value, regs + (page << 8) + reg); @@ -138,7 +144,8 @@ static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, u16 value) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; if (WARN_ON(reg % 2)) return -EINVAL; @@ -154,7 +161,8 @@ static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg, u32 value) { - u8 __iomem *regs = dev->priv; + struct b53_mmap_priv *priv = dev->priv; + void __iomem *regs = priv->regs; if (WARN_ON(reg % 4)) return -EINVAL; @@ -223,12 +231,19 @@ static const struct b53_io_ops b53_mmap_ops = { static int b53_mmap_probe(struct platform_device *pdev) { struct b53_platform_data *pdata = pdev->dev.platform_data; + struct b53_mmap_priv *priv; struct b53_device *dev; if (!pdata) return -EINVAL; - dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = pdata->regs; + + dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv); if (!dev) return -ENOMEM; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index d954cf36ecd8..1187ebd79287 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -288,7 +288,7 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); int b53_configure_vlan(struct dsa_switch *ds); void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); -int b53_get_sset_count(struct dsa_switch *ds); +int b53_get_sset_count(struct dsa_switch *ds, int port); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 7aa84ee4e771..f77be9f85cb3 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -86,7 +86,7 @@ static int dsa_loop_setup(struct dsa_switch *ds) return 0; } -static int dsa_loop_get_sset_count(struct dsa_switch *ds) +static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port) { return __DSA_LOOP_CNT_MAX; } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 6171c0853ff1..fefa454f3e56 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1007,7 +1007,7 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, } } -static int lan9303_get_sset_count(struct dsa_switch *ds) +static int lan9303_get_sset_count(struct dsa_switch *ds, int port) { return ARRAY_SIZE(lan9303_mib); } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 663b0d5b982b..bcb3e6c734f2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -439,7 +439,7 @@ static void ksz_disable_port(struct dsa_switch *ds, int port, ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); } -static int ksz_sset_count(struct dsa_switch *ds) +static int ksz_sset_count(struct dsa_switch *ds, int port) { return TOTAL_SWITCH_COUNTER_NUM; } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8a0bb000d056..80a4dbc3a499 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -604,7 +604,7 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -mt7530_get_sset_count(struct dsa_switch *ds) +mt7530_get_sset_count(struct dsa_switch *ds, int port) { return ARRAY_SIZE(mt7530_mib); } @@ -917,7 +917,7 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port, mutex_lock(&priv->reg_mutex); mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); - ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); mutex_unlock(&priv->reg_mutex); return ret; @@ -933,7 +933,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port, mutex_lock(&priv->reg_mutex); mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); - ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); mutex_unlock(&priv->reg_mutex); return ret; @@ -1293,7 +1293,7 @@ mt7530_setup(struct dsa_switch *ds) } /* Flush the FDB table */ - ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, 0); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); if (ret < 0) return ret; @@ -1409,6 +1409,7 @@ static const struct of_device_id mt7530_of_match[] = { { .compatible = "mediatek,mt7530" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, mt7530_of_match); static struct mdio_driver mt7530_mdio_driver = { .probe = mt7530_probe, @@ -1424,4 +1425,3 @@ mdio_module_driver(mt7530_mdio_driver); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:mediatek-mt7530"); diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 1aaa7a95ebc4..ae9e7f7cb31c 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -18,3 +18,13 @@ config NET_DSA_MV88E6XXX_GLOBAL2 It is required on most chips. If the chip you compile the support for doesn't have such registers set, say N here. In doubt, say Y. + +config NET_DSA_MV88E6XXX_PTP + bool "PTP support for Marvell 88E6xxx" + default n + depends on NET_DSA_MV88E6XXX_GLOBAL2 + imply NETWORK_PHY_TIMESTAMPING + imply PTP_1588_CLOCK + help + Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch + chips that support it. diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index 58a4a0014e59..50de304abe2f 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -5,6 +5,10 @@ mv88e6xxx-objs += global1.o mv88e6xxx-objs += global1_atu.o mv88e6xxx-objs += global1_vtu.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o +mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_avb.o +mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o +mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o mv88e6xxx-objs += phy.o mv88e6xxx-objs += port.o +mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o mv88e6xxx-objs += serdes.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eb328bade225..3d2091099f7f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -36,8 +36,10 @@ #include "chip.h" #include "global1.h" #include "global2.h" +#include "hwtstamp.h" #include "phy.h" #include "port.h" +#include "ptp.h" #include "serdes.h" static void assert_reg_lock(struct mv88e6xxx_chip *chip) @@ -251,9 +253,8 @@ static void mv88e6xxx_g1_irq_unmask(struct irq_data *d) chip->g1_irq.masked &= ~(1 << n); } -static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id) +static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) { - struct mv88e6xxx_chip *chip = dev_id; unsigned int nhandled = 0; unsigned int sub_irq; unsigned int n; @@ -278,6 +279,13 @@ out: return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } +static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_chip *chip = dev_id; + + return mv88e6xxx_g1_irq_thread_work(chip); +} + static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); @@ -333,7 +341,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = { .xlate = irq_domain_xlate_twocell, }; -static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) +static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) { int irq, virq; u16 mask; @@ -342,8 +350,6 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) mask &= ~GENMASK(chip->g1_irq.nirqs, 0); mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); - free_irq(chip->irq, chip); - for (irq = 0; irq < chip->g1_irq.nirqs; irq++) { virq = irq_find_mapping(chip->g1_irq.domain, irq); irq_dispose_mapping(virq); @@ -352,7 +358,14 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) irq_domain_remove(chip->g1_irq.domain); } -static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) +static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) +{ + mv88e6xxx_g1_irq_free_common(chip); + + free_irq(chip->irq, chip); +} + +static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) { int err, irq, virq; u16 reg, mask; @@ -385,13 +398,6 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) if (err) goto out_disable; - err = request_threaded_irq(chip->irq, NULL, - mv88e6xxx_g1_irq_thread_fn, - IRQF_ONESHOT | IRQF_TRIGGER_FALLING, - dev_name(chip->dev), chip); - if (err) - goto out_disable; - return 0; out_disable: @@ -409,6 +415,64 @@ out_mapping: return err; } +static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6xxx_g1_irq_setup_common(chip); + if (err) + return err; + + err = request_threaded_irq(chip->irq, NULL, + mv88e6xxx_g1_irq_thread_fn, + IRQF_ONESHOT, + dev_name(chip->dev), chip); + if (err) + mv88e6xxx_g1_irq_free_common(chip); + + return err; +} + +static void mv88e6xxx_irq_poll(struct kthread_work *work) +{ + struct mv88e6xxx_chip *chip = container_of(work, + struct mv88e6xxx_chip, + irq_poll_work.work); + mv88e6xxx_g1_irq_thread_work(chip); + + kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work, + msecs_to_jiffies(100)); +} + +static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6xxx_g1_irq_setup_common(chip); + if (err) + return err; + + kthread_init_delayed_work(&chip->irq_poll_work, + mv88e6xxx_irq_poll); + + chip->kworker = kthread_create_worker(0, dev_name(chip->dev)); + if (IS_ERR(chip->kworker)) + return PTR_ERR(chip->kworker); + + kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work, + msecs_to_jiffies(100)); + + return 0; +} + +static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) +{ + mv88e6xxx_g1_irq_free_common(chip); + + kthread_cancel_delayed_work_sync(&chip->irq_poll_work); + kthread_destroy_worker(chip->kworker); +} + int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) { int i; @@ -604,7 +668,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, return UINT64_MAX; low = reg; - if (s->sizeof_stat == 4) { + if (s->size == 4) { err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); if (err) return UINT64_MAX; @@ -617,7 +681,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, case STATS_TYPE_BANK0: reg |= s->reg | histogram; mv88e6xxx_g1_stats_read(chip, reg, &low); - if (s->sizeof_stat == 8) + if (s->size == 8) mv88e6xxx_g1_stats_read(chip, reg + 1, &high); break; default: @@ -627,8 +691,8 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, return value; } -static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip, - uint8_t *data, int types) +static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip, + uint8_t *data, int types) { struct mv88e6xxx_hw_stat *stat; int i, j; @@ -641,29 +705,62 @@ static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip, j++; } } + + return j; } -static void mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip, - uint8_t *data) +static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip, + uint8_t *data) { - mv88e6xxx_stats_get_strings(chip, data, - STATS_TYPE_BANK0 | STATS_TYPE_PORT); + return mv88e6xxx_stats_get_strings(chip, data, + STATS_TYPE_BANK0 | STATS_TYPE_PORT); } -static void mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip, - uint8_t *data) +static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip, + uint8_t *data) { - mv88e6xxx_stats_get_strings(chip, data, - STATS_TYPE_BANK0 | STATS_TYPE_BANK1); + return mv88e6xxx_stats_get_strings(chip, data, + STATS_TYPE_BANK0 | STATS_TYPE_BANK1); +} + +static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = { + "atu_member_violation", + "atu_miss_violation", + "atu_full_violation", + "vtu_member_violation", + "vtu_miss_violation", +}; + +static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++) + strlcpy(data + i * ETH_GSTRING_LEN, + mv88e6xxx_atu_vtu_stats_strings[i], + ETH_GSTRING_LEN); } static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { struct mv88e6xxx_chip *chip = ds->priv; + int count = 0; + + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_strings) - chip->info->ops->stats_get_strings(chip, data); + count = chip->info->ops->stats_get_strings(chip, data); + + if (chip->info->ops->serdes_get_strings) { + data += count * ETH_GSTRING_LEN; + count = chip->info->ops->serdes_get_strings(chip, port, data); + } + + data += count * ETH_GSTRING_LEN; + mv88e6xxx_atu_vtu_get_strings(data); + + mutex_unlock(&chip->reg_lock); } static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip, @@ -692,19 +789,37 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip) STATS_TYPE_BANK1); } -static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port) { struct mv88e6xxx_chip *chip = ds->priv; + int serdes_count = 0; + int count = 0; + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_sset_count) - return chip->info->ops->stats_get_sset_count(chip); + count = chip->info->ops->stats_get_sset_count(chip); + if (count < 0) + goto out; - return 0; + if (chip->info->ops->serdes_get_sset_count) + serdes_count = chip->info->ops->serdes_get_sset_count(chip, + port); + if (serdes_count < 0) { + count = serdes_count; + goto out; + } + count += serdes_count; + count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); + +out: + mutex_unlock(&chip->reg_lock); + + return count; } -static void mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data, int types, - u16 bank1_select, u16 histogram) +static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data, int types, + u16 bank1_select, u16 histogram) { struct mv88e6xxx_hw_stat *stat; int i, j; @@ -712,24 +827,28 @@ static void mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port, for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; if (stat->type & types) { + mutex_lock(&chip->reg_lock); data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port, bank1_select, histogram); + mutex_unlock(&chip->reg_lock); + j++; } } + return j; } -static void mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data) +static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) { return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0 | STATS_TYPE_PORT, 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX); } -static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data) +static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) { return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -737,8 +856,8 @@ static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port, MV88E6XXX_G1_STATS_OP_HIST_RX_TX); } -static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data) +static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) { return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -746,11 +865,32 @@ static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port, 0); } +static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) +{ + *data++ = chip->ports[port].atu_member_violation; + *data++ = chip->ports[port].atu_miss_violation; + *data++ = chip->ports[port].atu_full_violation; + *data++ = chip->ports[port].vtu_member_violation; + *data++ = chip->ports[port].vtu_miss_violation; +} + static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port, uint64_t *data) { + int count = 0; + if (chip->info->ops->stats_get_stats) - chip->info->ops->stats_get_stats(chip, port, data); + count = chip->info->ops->stats_get_stats(chip, port, data); + + mutex_lock(&chip->reg_lock); + if (chip->info->ops->serdes_get_stats) { + data += count; + count = chip->info->ops->serdes_get_stats(chip, port, data); + } + data += count; + mv88e6xxx_atu_vtu_get_stats(chip, port, data); + mutex_unlock(&chip->reg_lock); } static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, @@ -762,14 +902,13 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, mutex_lock(&chip->reg_lock); ret = mv88e6xxx_stats_snapshot(chip, port); - if (ret < 0) { - mutex_unlock(&chip->reg_lock); + mutex_unlock(&chip->reg_lock); + + if (ret < 0) return; - } mv88e6xxx_get_stats(chip, port, data); - mutex_unlock(&chip->reg_lock); } static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip) @@ -1433,7 +1572,9 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, eth_broadcast_addr(addr.mac); do { + mutex_lock(&chip->reg_lock); err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr); + mutex_unlock(&chip->reg_lock); if (err) return err; @@ -1466,7 +1607,10 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, int err; /* Dump port's default Filtering Information Database (VLAN ID 0) */ + mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_get_fid(chip, port, &fid); + mutex_unlock(&chip->reg_lock); + if (err) return err; @@ -1476,7 +1620,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, /* Dump VLANs' Filtering Information Databases */ do { + mutex_lock(&chip->reg_lock); err = mv88e6xxx_vtu_getnext(chip, &vlan); + mutex_unlock(&chip->reg_lock); if (err) return err; @@ -1496,13 +1642,8 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, cb, data); - mutex_unlock(&chip->reg_lock); - return err; + return mv88e6xxx_port_db_dump(chip, port, cb, data); } static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, @@ -2092,6 +2233,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + /* Setup PTP Hardware Clock and timestamping */ + if (chip->info->ptp_support) { + err = mv88e6xxx_ptp_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_hwtstamp_setup(chip); + if (err) + goto unlock; + } + unlock: mutex_unlock(&chip->reg_lock); @@ -2148,6 +2300,15 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct mii_bus *bus; int err; + if (external) { + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true); + mutex_unlock(&chip->reg_lock); + + if (err) + return err; + } + bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); if (!bus) return -ENOMEM; @@ -2170,12 +2331,19 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, bus->write = mv88e6xxx_mdio_write; bus->parent = chip->dev; + if (!external) { + err = mv88e6xxx_g2_irq_mdio_setup(chip, bus); + if (err) + return err; + } + if (np) err = of_mdiobus_register(bus, np); else err = mdiobus_register(bus); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); + mv88e6xxx_g2_irq_mdio_free(chip, bus); return err; } @@ -2202,6 +2370,9 @@ static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) list_for_each_entry(mdio_bus, &chip->mdios, list) { bus = mdio_bus->bus; + if (!mdio_bus->external) + mv88e6xxx_g2_irq_mdio_free(chip, bus); + mdiobus_unregister(bus); } } @@ -2472,6 +2643,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, }; static const struct mv88e6xxx_ops mv88e6161_ops = { @@ -2602,6 +2774,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, }; static const struct mv88e6xxx_ops mv88e6175_ops = { @@ -2673,6 +2846,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, }; static const struct mv88e6xxx_ops mv88e6185_ops = { @@ -2736,6 +2910,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, }; static const struct mv88e6xxx_ops mv88e6190x_ops = { @@ -2771,6 +2946,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, }; static const struct mv88e6xxx_ops mv88e6191_ops = { @@ -2843,6 +3019,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -2879,6 +3057,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -2913,6 +3093,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -2945,6 +3127,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -2981,6 +3165,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3049,6 +3235,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6352_avb_ops, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3086,6 +3273,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6352_avb_ops, + .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, + .serdes_get_strings = mv88e6352_serdes_get_strings, + .serdes_get_stats = mv88e6352_serdes_get_stats, }; static const struct mv88e6xxx_ops mv88e6390_ops = { @@ -3124,6 +3316,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3162,6 +3356,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { @@ -3171,6 +3367,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6085", .num_databases = 4096, .num_ports = 10, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3191,6 +3388,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6095/88E6095F", .num_databases = 256, .num_ports = 11, + .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3209,6 +3407,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6097/88E6097F", .num_databases = 4096, .num_ports = 11, + .num_internal_phys = 8, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3229,6 +3428,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6123", .num_databases = 4096, .num_ports = 3, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3249,6 +3449,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6131", .num_databases = 256, .num_ports = 8, + .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3264,15 +3465,18 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { [MV88E6141] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141, .family = MV88E6XXX_FAMILY_6341, - .name = "Marvell 88E6341", + .name = "Marvell 88E6141", .num_databases = 4096, .num_ports = 6, + .num_internal_phys = 5, + .num_gpio = 11, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g1_irqs = 9, .g2_irqs = 10, .pvt = true, .multi_chip = true, @@ -3286,6 +3490,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6161", .num_databases = 4096, .num_ports = 6, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3306,6 +3511,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6165", .num_databases = 4096, .num_ports = 6, + .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3326,6 +3532,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6171", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3346,6 +3553,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6172", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3366,6 +3575,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6175", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3386,6 +3596,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6176", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3406,6 +3618,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6185", .num_databases = 256, .num_ports = 10, + .num_internal_phys = 0, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3424,6 +3637,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, + .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3444,6 +3659,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, + .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3464,6 +3681,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6191", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3475,6 +3693,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6191_ops, }, @@ -3484,6 +3703,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6240", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3495,6 +3716,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6240_ops, }, @@ -3504,6 +3726,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6290", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, + .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3515,6 +3739,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6290_ops, }, @@ -3524,16 +3749,20 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6320", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6320_ops, }, @@ -3543,15 +3772,19 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6321", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6321_ops, }, @@ -3560,17 +3793,21 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6341", .num_databases = 4096, + .num_internal_phys = 5, .num_ports = 6, + .num_gpio = 11, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g1_irqs = 9, .g2_irqs = 10, .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6341_ops, }, @@ -3580,6 +3817,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6350", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3600,6 +3838,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6351", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3620,6 +3859,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6352", .num_databases = 4096, .num_ports = 7, + .num_internal_phys = 5, + .num_gpio = 15, .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, @@ -3631,6 +3872,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6352_ops, }, [MV88E6390] = { @@ -3639,6 +3881,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, + .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3650,6 +3894,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6390_ops, }, [MV88E6390X] = { @@ -3658,6 +3903,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .num_internal_phys = 11, + .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, @@ -3669,6 +3916,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6390x_ops, }, }; @@ -3880,6 +4128,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_del = mv88e6xxx_port_mdb_del, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, + .port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set, + .port_hwtstamp_get = mv88e6xxx_port_hwtstamp_get, + .port_txtstamp = mv88e6xxx_port_txtstamp, + .port_rxtstamp = mv88e6xxx_port_rxtstamp, + .get_ts_info = mv88e6xxx_get_ts_info, }; static struct dsa_switch_driver mv88e6xxx_switch_drv = { @@ -3959,33 +4212,34 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) goto out; } - if (chip->irq > 0) { - /* Has to be performed before the MDIO bus is created, - * because the PHYs will link there interrupts to these - * interrupt controllers - */ - mutex_lock(&chip->reg_lock); + /* Has to be performed before the MDIO bus is created, because + * the PHYs will link their interrupts to these interrupt + * controllers + */ + mutex_lock(&chip->reg_lock); + if (chip->irq > 0) err = mv88e6xxx_g1_irq_setup(chip); - mutex_unlock(&chip->reg_lock); - - if (err) - goto out; - - if (chip->info->g2_irqs > 0) { - err = mv88e6xxx_g2_irq_setup(chip); - if (err) - goto out_g1_irq; - } + else + err = mv88e6xxx_irq_poll_setup(chip); + mutex_unlock(&chip->reg_lock); - err = mv88e6xxx_g1_atu_prob_irq_setup(chip); - if (err) - goto out_g2_irq; + if (err) + goto out; - err = mv88e6xxx_g1_vtu_prob_irq_setup(chip); + if (chip->info->g2_irqs > 0) { + err = mv88e6xxx_g2_irq_setup(chip); if (err) - goto out_g1_atu_prob_irq; + goto out_g1_irq; } + err = mv88e6xxx_g1_atu_prob_irq_setup(chip); + if (err) + goto out_g2_irq; + + err = mv88e6xxx_g1_vtu_prob_irq_setup(chip); + if (err) + goto out_g1_atu_prob_irq; + err = mv88e6xxx_mdios_register(chip, np); if (err) goto out_g1_vtu_prob_irq; @@ -3999,20 +4253,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) out_mdio: mv88e6xxx_mdios_unregister(chip); out_g1_vtu_prob_irq: - if (chip->irq > 0) - mv88e6xxx_g1_vtu_prob_irq_free(chip); + mv88e6xxx_g1_vtu_prob_irq_free(chip); out_g1_atu_prob_irq: - if (chip->irq > 0) - mv88e6xxx_g1_atu_prob_irq_free(chip); + mv88e6xxx_g1_atu_prob_irq_free(chip); out_g2_irq: - if (chip->info->g2_irqs > 0 && chip->irq > 0) + if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); out_g1_irq: - if (chip->irq > 0) { - mutex_lock(&chip->reg_lock); + mutex_lock(&chip->reg_lock); + if (chip->irq > 0) mv88e6xxx_g1_irq_free(chip); - mutex_unlock(&chip->reg_lock); - } + else + mv88e6xxx_irq_poll_free(chip); + mutex_unlock(&chip->reg_lock); out: return err; } @@ -4022,19 +4275,27 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); struct mv88e6xxx_chip *chip = ds->priv; + if (chip->info->ptp_support) { + mv88e6xxx_hwtstamp_free(chip); + mv88e6xxx_ptp_free(chip); + } + mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); mv88e6xxx_mdios_unregister(chip); - if (chip->irq > 0) { - mv88e6xxx_g1_vtu_prob_irq_free(chip); - mv88e6xxx_g1_atu_prob_irq_free(chip); - if (chip->info->g2_irqs > 0) - mv88e6xxx_g2_irq_free(chip); - mutex_lock(&chip->reg_lock); + mv88e6xxx_g1_vtu_prob_irq_free(chip); + mv88e6xxx_g1_atu_prob_irq_free(chip); + + if (chip->info->g2_irqs > 0) + mv88e6xxx_g2_irq_free(chip); + + mutex_lock(&chip->reg_lock); + if (chip->irq > 0) mv88e6xxx_g1_irq_free(chip); - mutex_unlock(&chip->reg_lock); - } + else + mv88e6xxx_irq_poll_free(chip); + mutex_unlock(&chip->reg_lock); } static const struct of_device_id mv88e6xxx_of_match[] = { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 3dba6e90adcf..80490f66bc06 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -15,7 +15,10 @@ #include <linux/if_vlan.h> #include <linux/irq.h> #include <linux/gpio/consumer.h> +#include <linux/kthread.h> #include <linux/phy.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> #include <net/dsa.h> #ifndef UINT64_MAX @@ -39,6 +42,8 @@ #define MV88E6XXX_MAX_PVT_SWITCHES 32 #define MV88E6XXX_MAX_PVT_PORTS 16 +#define MV88E6XXX_MAX_GPIO 16 + enum mv88e6xxx_egress_mode { MV88E6XXX_EGRESS_MODE_UNMODIFIED, MV88E6XXX_EGRESS_MODE_UNTAGGED, @@ -105,6 +110,8 @@ struct mv88e6xxx_info { const char *name; unsigned int num_databases; unsigned int num_ports; + unsigned int num_internal_phys; + unsigned int num_gpio; unsigned int max_vid; unsigned int port_base_addr; unsigned int global1_addr; @@ -126,6 +133,9 @@ struct mv88e6xxx_info { */ u8 atu_move_port_mask; const struct mv88e6xxx_ops *ops; + + /* Supports PTP */ + bool ptp_support; }; struct mv88e6xxx_atu_entry { @@ -146,6 +156,8 @@ struct mv88e6xxx_vtu_entry { struct mv88e6xxx_bus_ops; struct mv88e6xxx_irq_ops; +struct mv88e6xxx_gpio_ops; +struct mv88e6xxx_avb_ops; struct mv88e6xxx_irq { u16 masked; @@ -154,6 +166,41 @@ struct mv88e6xxx_irq { unsigned int nirqs; }; +/* state flags for mv88e6xxx_port_hwtstamp::state */ +enum { + MV88E6XXX_HWTSTAMP_ENABLED, + MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, +}; + +struct mv88e6xxx_port_hwtstamp { + /* Port index */ + int port_id; + + /* Timestamping state */ + unsigned long state; + + /* Resources for receive timestamping */ + struct sk_buff_head rx_queue; + struct sk_buff_head rx_queue2; + + /* Resources for transmit timestamping */ + unsigned long tx_tstamp_start; + struct sk_buff *tx_skb; + u16 tx_seq_id; + + /* Current timestamp configuration */ + struct hwtstamp_config tstamp_config; +}; + +struct mv88e6xxx_port { + u64 serdes_stats[2]; + u64 atu_member_violation; + u64 atu_miss_violation; + u64 atu_full_violation; + u64 vtu_member_violation; + u64 vtu_miss_violation; +}; + struct mv88e6xxx_chip { const struct mv88e6xxx_info *info; @@ -207,8 +254,34 @@ struct mv88e6xxx_chip { int irq; int device_irq; int watchdog_irq; + int atu_prob_irq; int vtu_prob_irq; + struct kthread_worker *kworker; + struct kthread_delayed_work irq_poll_work; + + /* GPIO resources */ + u8 gpio_data[2]; + + /* This cyclecounter abstracts the switch PTP time. + * reg_lock must be held for any operation that read()s. + */ + struct cyclecounter tstamp_cc; + struct timecounter tstamp_tc; + struct delayed_work overflow_work; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct delayed_work tai_event_work; + struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO]; + u16 trig_config; + u16 evcap_config; + + /* Per-port timestamping resources. */ + struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; + + /* Array of port structures. */ + struct mv88e6xxx_port ports[DSA_MAX_PORTS]; }; struct mv88e6xxx_bus_ops { @@ -327,9 +400,9 @@ struct mv88e6xxx_ops { /* Return the number of strings describing statistics */ int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip); - void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data); - void (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port, - uint64_t *data); + int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data); + int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port); int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); const struct mv88e6xxx_irq_ops *watchdog_ops; @@ -339,11 +412,24 @@ struct mv88e6xxx_ops { /* Power on/off a SERDES interface */ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on); + /* Statistics from the SERDES interface */ + int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); + int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port, + uint8_t *data); + int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); + /* VLAN Translation Unit operations */ int (*vtu_getnext)(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); + + /* GPIO operations */ + const struct mv88e6xxx_gpio_ops *gpio_ops; + + /* Interface to the AVB/PTP registers */ + const struct mv88e6xxx_avb_ops *avb_ops; }; struct mv88e6xxx_irq_ops { @@ -355,13 +441,49 @@ struct mv88e6xxx_irq_ops { void (*irq_free)(struct mv88e6xxx_chip *chip); }; +struct mv88e6xxx_gpio_ops { + /* Get/set data on GPIO pin */ + int (*get_data)(struct mv88e6xxx_chip *chip, unsigned int pin); + int (*set_data)(struct mv88e6xxx_chip *chip, unsigned int pin, + int value); + + /* get/set GPIO direction */ + int (*get_dir)(struct mv88e6xxx_chip *chip, unsigned int pin); + int (*set_dir)(struct mv88e6xxx_chip *chip, unsigned int pin, + bool input); + + /* get/set GPIO pin control */ + int (*get_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin, + int *func); + int (*set_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin, + int func); +}; + +struct mv88e6xxx_avb_ops { + /* Access port-scoped Precision Time Protocol registers */ + int (*port_ptp_read)(struct mv88e6xxx_chip *chip, int port, int addr, + u16 *data, int len); + int (*port_ptp_write)(struct mv88e6xxx_chip *chip, int port, int addr, + u16 data); + + /* Access global Precision Time Protocol registers */ + int (*ptp_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data, + int len); + int (*ptp_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); + + /* Access global Time Application Interface registers */ + int (*tai_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data, + int len); + int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) struct mv88e6xxx_hw_stat { char string[ETH_GSTRING_LEN]; - int sizeof_stat; + size_t size; int reg; int type; }; @@ -386,6 +508,11 @@ static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip) return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0); } +static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip) +{ + return chip->info->num_gpio; +} + int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 20d941f4273b..307410898fc9 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -336,8 +336,6 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; - mutex_unlock(&chip->reg_lock); - if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU age out violation for %pM\n", @@ -348,17 +346,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) dev_err_ratelimited(chip->dev, "ATU member violation for %pM portvec %x\n", entry.mac, entry.portvec); + chip->ports[entry.portvec].atu_member_violation++; } - if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) + if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU miss violation for %pM portvec %x\n", entry.mac, entry.portvec); + chip->ports[entry.portvec].atu_miss_violation++; + } - if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) + if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU full violation for %pM portvec %x\n", entry.mac, entry.portvec); + chip->ports[entry.portvec].atu_full_violation++; + } + mutex_unlock(&chip->reg_lock); return IRQ_HANDLED; diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 7997961647de..058326924f3e 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -539,18 +539,21 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; - mutex_unlock(&chip->reg_lock); - spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK; if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n", entry.vid, spid); + chip->ports[spid].vtu_member_violation++; } - if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) - dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n", + if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) { + dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n", entry.vid, spid); + chip->ports[spid].vtu_miss_violation++; + } + + mutex_unlock(&chip->reg_lock); return IRQ_HANDLED; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index af0727877825..0ce627fded48 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -20,22 +20,22 @@ #include "global1.h" /* for MV88E6XXX_G1_STS_IRQ_DEVICE */ #include "global2.h" -static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) +int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) { return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val); } -static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) +int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) { return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val); } -static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) +int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) { return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update); } -static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) +int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) { return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask); } @@ -798,6 +798,7 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus, val); } +/* Offset 0x1B: Watchdog Control */ static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq) { u16 reg; @@ -1089,7 +1090,7 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) err = request_threaded_irq(chip->device_irq, NULL, mv88e6xxx_g2_irq_thread_fn, - IRQF_ONESHOT, "mv88e6xxx-g1", chip); + IRQF_ONESHOT, "mv88e6xxx-g2", chip); if (err) goto out; @@ -1106,6 +1107,38 @@ out: return err; } +int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, + struct mii_bus *bus) +{ + int phy, irq, err, err_phy; + + for (phy = 0; phy < chip->info->num_internal_phys; phy++) { + irq = irq_find_mapping(chip->g2_irq.domain, phy); + if (irq < 0) { + err = irq; + goto out; + } + bus->irq[chip->info->port_base_addr + phy] = irq; + } + return 0; +out: + err_phy = phy; + + for (phy = 0; phy < err_phy; phy++) + irq_dispose_mapping(bus->irq[phy]); + + return err; +} + +void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, + struct mii_bus *bus) +{ + int phy; + + for (phy = 0; phy < chip->info->num_internal_phys; phy++) + irq_dispose_mapping(bus->irq[phy]); +} + int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) { u16 reg; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 669f59017b12..520ec70d32e8 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -149,7 +149,26 @@ #define MV88E6390_G2_EEPROM_ADDR_MASK 0xffff /* Offset 0x16: AVB Command Register */ -#define MV88E6352_G2_AVB_CMD 0x16 +#define MV88E6352_G2_AVB_CMD 0x16 +#define MV88E6352_G2_AVB_CMD_BUSY 0x8000 +#define MV88E6352_G2_AVB_CMD_OP_READ 0x4000 +#define MV88E6352_G2_AVB_CMD_OP_READ_INCR 0x6000 +#define MV88E6352_G2_AVB_CMD_OP_WRITE 0x3000 +#define MV88E6390_G2_AVB_CMD_OP_READ 0x0000 +#define MV88E6390_G2_AVB_CMD_OP_READ_INCR 0x4000 +#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000 +#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00 +#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe +#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf +#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00 +#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e +#define MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL 0x1f +#define MV88E6352_G2_AVB_CMD_BLOCK_PTP 0 +#define MV88E6352_G2_AVB_CMD_BLOCK_AVB 1 +#define MV88E6352_G2_AVB_CMD_BLOCK_QAV 2 +#define MV88E6352_G2_AVB_CMD_BLOCK_QVB 3 +#define MV88E6352_G2_AVB_CMD_BLOCK_MASK 0x00e0 +#define MV88E6352_G2_AVB_CMD_ADDR_MASK 0x001f /* Offset 0x17: AVB Data Register */ #define MV88E6352_G2_AVB_DATA 0x17 @@ -223,6 +242,40 @@ #define MV88E6352_G2_NOEGR_POLICY 0x2000 #define MV88E6390_G2_LAG_ID_4 0x2000 +/* Scratch/Misc registers accessed through MV88E6XXX_G2_SCRATCH_MISC */ +/* Offset 0x02: Misc Configuration */ +#define MV88E6352_G2_SCRATCH_MISC_CFG 0x02 +#define MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI 0x80 +/* Offset 0x60-0x61: GPIO Configuration */ +#define MV88E6352_G2_SCRATCH_GPIO_CFG0 0x60 +#define MV88E6352_G2_SCRATCH_GPIO_CFG1 0x61 +/* Offset 0x62-0x63: GPIO Direction */ +#define MV88E6352_G2_SCRATCH_GPIO_DIR0 0x62 +#define MV88E6352_G2_SCRATCH_GPIO_DIR1 0x63 +#define MV88E6352_G2_SCRATCH_GPIO_DIR_OUT 0 +#define MV88E6352_G2_SCRATCH_GPIO_DIR_IN 1 +/* Offset 0x64-0x65: GPIO Data */ +#define MV88E6352_G2_SCRATCH_GPIO_DATA0 0x64 +#define MV88E6352_G2_SCRATCH_GPIO_DATA1 0x65 +/* Offset 0x68-0x6F: GPIO Pin Control */ +#define MV88E6352_G2_SCRATCH_GPIO_PCTL0 0x68 +#define MV88E6352_G2_SCRATCH_GPIO_PCTL1 0x69 +#define MV88E6352_G2_SCRATCH_GPIO_PCTL2 0x6A +#define MV88E6352_G2_SCRATCH_GPIO_PCTL3 0x6B +#define MV88E6352_G2_SCRATCH_GPIO_PCTL4 0x6C +#define MV88E6352_G2_SCRATCH_GPIO_PCTL5 0x6D +#define MV88E6352_G2_SCRATCH_GPIO_PCTL6 0x6E +#define MV88E6352_G2_SCRATCH_GPIO_PCTL7 0x6F +#define MV88E6352_G2_SCRATCH_CONFIG_DATA0 0x70 +#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71 +#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2) +#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72 +#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3 + +#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0 +#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1 +#define MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ 2 + #ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) @@ -230,6 +283,11 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) return 0; } +int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val); +int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val); +int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update); +int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask); + int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); @@ -259,6 +317,11 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, + struct mii_bus *bus); +void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, + struct mii_bus *bus); + int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); @@ -267,6 +330,14 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; +extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops; +extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops; + +extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops; + +int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, + bool external); + #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) @@ -279,6 +350,26 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) return 0; } +static inline int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) +{ + return -EOPNOTSUPP; +} + static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port) { @@ -364,6 +455,17 @@ static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip) { } +static inline int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, + struct mii_bus *bus) +{ + return 0; +} + +static inline void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, + struct mii_bus *bus) +{ +} + static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; @@ -382,6 +484,17 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; +static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {}; +static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {}; + +static const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {}; + +static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, + bool external) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c new file mode 100644 index 000000000000..2e398ccb88ca --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c @@ -0,0 +1,193 @@ +/* + * Marvell 88E6xxx Switch Global 2 Registers support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot <vivien.didelot@savoirfairelinux.com> + * + * Copyright (c) 2017 National Instruments + * Brandon Streiff <brandon.streiff@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "global2.h" + +/* Offset 0x16: AVB Command Register + * Offset 0x17: AVB Data Register + * + * There are two different versions of this register interface: + * "6352": 3-bit "op" field, 4-bit "port" field. + * "6390": 2-bit "op" field, 5-bit "port" field. + * + * The "op" codes are different between the two, as well as the special + * port fields for global PTP and TAI configuration. + */ + +/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words. + * The hardware supports snapshotting up to four contiguous registers. + */ +static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop, + u16 *data, int len) +{ + int err; + int i; + + /* Hardware can only snapshot four words. */ + if (len > 4) + return -E2BIG; + + err = mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, readop); + if (err) + return err; + + for (i = 0; i < len; ++i) { + err = mv88e6xxx_g2_read(chip, MV88E6352_G2_AVB_DATA, + &data[i]); + if (err) + return err; + } + + return 0; +} + +/* mv88e6xxx_g2_avb_write -- Write one 16-bit word. */ +static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop, + u16 data) +{ + int err; + + err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data); + if (err) + return err; + + return mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, writeop); +} + +static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, + int port, int addr, u16 *data, + int len) +{ + u16 readop = (len == 1 ? MV88E6352_G2_AVB_CMD_OP_READ : + MV88E6352_G2_AVB_CMD_OP_READ_INCR) | + (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | + addr; + + return mv88e6xxx_g2_avb_read(chip, readop, data, len); +} + +static int mv88e6352_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip, + int port, int addr, u16 data) +{ + u16 writeop = MV88E6352_G2_AVB_CMD_OP_WRITE | (port << 8) | + (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr; + + return mv88e6xxx_g2_avb_write(chip, writeop, data); +} + +static int mv88e6352_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6352_g2_avb_port_ptp_read(chip, + MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data, len); +} + +static int mv88e6352_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6352_g2_avb_port_ptp_write(chip, + MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data); +} + +static int mv88e6352_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6352_g2_avb_port_ptp_read(chip, + MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL, + addr, data, len); +} + +static int mv88e6352_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6352_g2_avb_port_ptp_write(chip, + MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL, + addr, data); +} + +const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = { + .port_ptp_read = mv88e6352_g2_avb_port_ptp_read, + .port_ptp_write = mv88e6352_g2_avb_port_ptp_write, + .ptp_read = mv88e6352_g2_avb_ptp_read, + .ptp_write = mv88e6352_g2_avb_ptp_write, + .tai_read = mv88e6352_g2_avb_tai_read, + .tai_write = mv88e6352_g2_avb_tai_write, +}; + +static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, + int port, int addr, u16 *data, + int len) +{ + u16 readop = (len == 1 ? MV88E6390_G2_AVB_CMD_OP_READ : + MV88E6390_G2_AVB_CMD_OP_READ_INCR) | + (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | + addr; + + return mv88e6xxx_g2_avb_read(chip, readop, data, len); +} + +static int mv88e6390_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip, + int port, int addr, u16 data) +{ + u16 writeop = MV88E6390_G2_AVB_CMD_OP_WRITE | (port << 8) | + (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr; + + return mv88e6xxx_g2_avb_write(chip, writeop, data); +} + +static int mv88e6390_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6390_g2_avb_port_ptp_read(chip, + MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data, len); +} + +static int mv88e6390_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6390_g2_avb_port_ptp_write(chip, + MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data); +} + +static int mv88e6390_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6390_g2_avb_port_ptp_read(chip, + MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL, + addr, data, len); +} + +static int mv88e6390_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6390_g2_avb_port_ptp_write(chip, + MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL, + addr, data); +} + +const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = { + .port_ptp_read = mv88e6390_g2_avb_port_ptp_read, + .port_ptp_write = mv88e6390_g2_avb_port_ptp_write, + .ptp_read = mv88e6390_g2_avb_ptp_read, + .ptp_write = mv88e6390_g2_avb_ptp_write, + .tai_read = mv88e6390_g2_avb_tai_read, + .tai_write = mv88e6390_g2_avb_tai_write, +}; diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c new file mode 100644 index 000000000000..3f92b8892dc7 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -0,0 +1,291 @@ +/* + * Marvell 88E6xxx Switch Global 2 Scratch & Misc Registers support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 National Instruments + * Brandon Streiff <brandon.streiff@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "chip.h" +#include "global2.h" + +/* Offset 0x1A: Scratch and Misc. Register */ +static int mv88e6xxx_g2_scratch_read(struct mv88e6xxx_chip *chip, int reg, + u8 *data) +{ + u16 value; + int err; + + err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, + reg << 8); + if (err) + return err; + + err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, &value); + if (err) + return err; + + *data = (value & MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK); + + return 0; +} + +static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg, + u8 data) +{ + u16 value = (reg << 8) | data; + + return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, value); +} + +/** + * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit + * @chip: chip private data + * @nr: bit index + * @set: is bit set? + */ +static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip, + int base_reg, unsigned int offset, + int *set) +{ + int reg = base_reg + (offset / 8); + u8 mask = (1 << (offset & 0x7)); + u8 val; + int err; + + err = mv88e6xxx_g2_scratch_read(chip, reg, &val); + if (err) + return err; + + *set = !!(mask & val); + + return 0; +} + +/** + * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit + * @chip: chip private data + * @nr: bit index + * @set: set if true, clear if false + * + * Helper function for dealing with the direction and data registers. + */ +static int mv88e6xxx_g2_scratch_set_bit(struct mv88e6xxx_chip *chip, + int base_reg, unsigned int offset, + int set) +{ + int reg = base_reg + (offset / 8); + u8 mask = (1 << (offset & 0x7)); + u8 val; + int err; + + err = mv88e6xxx_g2_scratch_read(chip, reg, &val); + if (err) + return err; + + if (set) + val |= mask; + else + val &= ~mask; + + return mv88e6xxx_g2_scratch_write(chip, reg, val); +} + +/** + * mv88e6352_g2_scratch_gpio_get_data - get data on gpio pin + * @chip: chip private data + * @pin: gpio index + * + * Return: 0 for low, 1 for high, negative error + */ +static int mv88e6352_g2_scratch_gpio_get_data(struct mv88e6xxx_chip *chip, + unsigned int pin) +{ + int val = 0; + int err; + + err = mv88e6xxx_g2_scratch_get_bit(chip, + MV88E6352_G2_SCRATCH_GPIO_DATA0, + pin, &val); + if (err) + return err; + + return val; +} + +/** + * mv88e6352_g2_scratch_gpio_set_data - set data on gpio pin + * @chip: chip private data + * @pin: gpio index + * @value: value to set + */ +static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip, + unsigned int pin, int value) +{ + u8 mask = (1 << (pin & 0x7)); + int offset = (pin / 8); + int reg; + + reg = MV88E6352_G2_SCRATCH_GPIO_DATA0 + offset; + + if (value) + chip->gpio_data[offset] |= mask; + else + chip->gpio_data[offset] &= ~mask; + + return mv88e6xxx_g2_scratch_write(chip, reg, chip->gpio_data[offset]); +} + +/** + * mv88e6352_g2_scratch_gpio_get_dir - get direction of gpio pin + * @chip: chip private data + * @pin: gpio index + * + * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX). + */ +static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip, + unsigned int pin) +{ + int val = 0; + int err; + + err = mv88e6xxx_g2_scratch_get_bit(chip, + MV88E6352_G2_SCRATCH_GPIO_DIR0, + pin, &val); + if (err) + return err; + + return val; +} + +/** + * mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin + * @chip: chip private data + * @pin: gpio index + */ +static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip, + unsigned int pin, bool input) +{ + int value = (input ? MV88E6352_G2_SCRATCH_GPIO_DIR_IN : + MV88E6352_G2_SCRATCH_GPIO_DIR_OUT); + + return mv88e6xxx_g2_scratch_set_bit(chip, + MV88E6352_G2_SCRATCH_GPIO_DIR0, + pin, value); +} + +/** + * mv88e6352_g2_scratch_gpio_get_pctl - get pin control setting + * @chip: chip private data + * @pin: gpio index + * @func: function number + * + * Note that the function numbers themselves may vary by chipset. + */ +static int mv88e6352_g2_scratch_gpio_get_pctl(struct mv88e6xxx_chip *chip, + unsigned int pin, int *func) +{ + int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2); + int offset = (pin & 0x1) ? 4 : 0; + u8 mask = (0x7 << offset); + int err; + u8 val; + + err = mv88e6xxx_g2_scratch_read(chip, reg, &val); + if (err) + return err; + + *func = (val & mask) >> offset; + + return 0; +} + +/** + * mv88e6352_g2_scratch_gpio_set_pctl - set pin control setting + * @chip: chip private data + * @pin: gpio index + * @func: function number + */ +static int mv88e6352_g2_scratch_gpio_set_pctl(struct mv88e6xxx_chip *chip, + unsigned int pin, int func) +{ + int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2); + int offset = (pin & 0x1) ? 4 : 0; + u8 mask = (0x7 << offset); + int err; + u8 val; + + err = mv88e6xxx_g2_scratch_read(chip, reg, &val); + if (err) + return err; + + val = (val & ~mask) | ((func & mask) << offset); + + return mv88e6xxx_g2_scratch_write(chip, reg, val); +} + +const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = { + .get_data = mv88e6352_g2_scratch_gpio_get_data, + .set_data = mv88e6352_g2_scratch_gpio_set_data, + .get_dir = mv88e6352_g2_scratch_gpio_get_dir, + .set_dir = mv88e6352_g2_scratch_gpio_set_dir, + .get_pctl = mv88e6352_g2_scratch_gpio_get_pctl, + .set_pctl = mv88e6352_g2_scratch_gpio_set_pctl, +}; + +/** + * mv88e6xxx_g2_gpio_set_smi - set gpio muxing for external smi + * @chip: chip private data + * @external: set mux for external smi, or free for gpio usage + * + * Some mv88e6xxx models have GPIO pins that may be configured as + * an external SMI interface, or they may be made free for other + * GPIO uses. + */ +int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, + bool external) +{ + int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG; + int config_data1 = MV88E6352_G2_SCRATCH_CONFIG_DATA1; + int config_data2 = MV88E6352_G2_SCRATCH_CONFIG_DATA2; + bool no_cpu; + u8 p0_mode; + int err; + u8 val; + + err = mv88e6xxx_g2_scratch_read(chip, config_data2, &val); + if (err) + return err; + + p0_mode = val & MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK; + + if (p0_mode == 0x01 || p0_mode == 0x02) + return -EBUSY; + + err = mv88e6xxx_g2_scratch_read(chip, config_data1, &val); + if (err) + return err; + + no_cpu = !!(val & MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU); + + err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val); + if (err) + return err; + + /* NO_CPU being 0 inverts the meaning of the bit */ + if (!no_cpu) + external = !external; + + if (external) + val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI; + else + val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI; + + return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val); +} diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c new file mode 100644 index 000000000000..ac7694c71266 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -0,0 +1,576 @@ +/* + * Marvell 88E6xxx Switch hardware timestamping support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 National Instruments + * Erik Hons <erik.hons@ni.com> + * Brandon Streiff <brandon.streiff@ni.com> + * Dane Wagner <dane.wagner@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "chip.h" +#include "global2.h" +#include "hwtstamp.h" +#include "ptp.h" +#include <linux/ptp_classify.h> + +#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb)) + +static int mv88e6xxx_port_ptp_read(struct mv88e6xxx_chip *chip, int port, + int addr, u16 *data, int len) +{ + if (!chip->info->ops->avb_ops->port_ptp_read) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->port_ptp_read(chip, port, addr, + data, len); +} + +static int mv88e6xxx_port_ptp_write(struct mv88e6xxx_chip *chip, int port, + int addr, u16 data) +{ + if (!chip->info->ops->avb_ops->port_ptp_write) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->port_ptp_write(chip, port, addr, + data); +} + +static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + if (!chip->info->ops->avb_ops->ptp_write) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->ptp_write(chip, addr, data); +} + +/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX + * timestamp. When working properly, hardware will produce a timestamp + * within 1ms. Software may enounter delays due to MDIO contention, so + * the timeout is set accordingly. + */ +#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20) + +int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info) +{ + struct mv88e6xxx_chip *chip = ds->priv; + + if (!chip->info->ptp_support) + return -EOPNOTSUPP; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = ptp_clock_index(chip->ptp_clock); + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + return 0; +} + +static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, + struct hwtstamp_config *config) +{ + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + bool tstamp_enable = false; + u16 port_config0; + int err; + + /* Prevent the TX/RX paths from trying to interact with the + * timestamp hardware while we reconfigure it. + */ + clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state); + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_enable = false; + break; + case HWTSTAMP_TX_ON: + tstamp_enable = true; + break; + default: + return -ERANGE; + } + + /* The switch supports timestamping both L2 and L4; one cannot be + * disabled independently of the other. + */ + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_enable = false; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (tstamp_enable) { + /* Disable transportSpecific value matching, so that packets + * with either 1588 (0) and 802.1AS (1) will be timestamped. + */ + port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH; + } else { + /* Disable PTP. This disables both RX and TX timestamping. */ + port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP; + } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + port_config0); + mutex_unlock(&chip->reg_lock); + + if (err < 0) + return err; + + /* Once hardware has been configured, enable timestamp checks + * in the RX/TX paths. + */ + if (tstamp_enable) + set_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state); + + return 0; +} + +int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + struct hwtstamp_config config; + int err; + + if (!chip->info->ptp_support) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = mv88e6xxx_set_hwtstamp_config(chip, port, &config); + if (err) + return err; + + /* Save the chosen configuration to be returned later. */ + memcpy(&ps->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + struct hwtstamp_config *config = &ps->tstamp_config; + + if (!chip->info->ptp_support) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* Get the start of the PTP header in this skb */ +static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type) +{ + u8 *data = skb_mac_header(skb); + unsigned int offset = 0; + + if (type & PTP_CLASS_VLAN) + offset += VLAN_HLEN; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + offset += ETH_HLEN; + break; + default: + return NULL; + } + + /* Ensure that the entire header is present in this packet. */ + if (skb->len + ETH_HLEN < offset + 34) + return NULL; + + return data + offset; +} + +/* Returns a pointer to the PTP header if the caller should time stamp, + * or NULL if the caller should not. + */ +static u8 *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip, int port, + struct sk_buff *skb, unsigned int type) +{ + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + u8 *hdr; + + if (!chip->info->ptp_support) + return NULL; + + hdr = parse_ptp_header(skb, type); + if (!hdr) + return NULL; + + if (!test_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state)) + return NULL; + + return hdr; +} + +static int mv88e6xxx_ts_valid(u16 status) +{ + if (!(status & MV88E6XXX_PTP_TS_VALID)) + return 0; + if (status & MV88E6XXX_PTP_TS_STATUS_MASK) + return 0; + return 1; +} + +static int seq_match(struct sk_buff *skb, u16 ts_seqid) +{ + unsigned int type = SKB_PTP_TYPE(skb); + u8 *hdr = parse_ptp_header(skb, type); + __be16 *seqid; + + seqid = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID); + + return ts_seqid == ntohs(*seqid); +} + +static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_port_hwtstamp *ps, + struct sk_buff *skb, u16 reg, + struct sk_buff_head *rxq) +{ + u16 buf[4] = { 0 }, status, seq_id; + u64 ns, timelo, timehi; + struct skb_shared_hwtstamps *shwt; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_ptp_read(chip, ps->port_id, + reg, buf, ARRAY_SIZE(buf)); + mutex_unlock(&chip->reg_lock); + if (err) + pr_err("failed to get the receive time stamp\n"); + + status = buf[0]; + timelo = buf[1]; + timehi = buf[2]; + seq_id = buf[3]; + + if (status & MV88E6XXX_PTP_TS_VALID) { + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0); + mutex_unlock(&chip->reg_lock); + if (err) + pr_err("failed to clear the receive status\n"); + } + /* Since the device can only handle one time stamp at a time, + * we purge any extra frames from the queue. + */ + for ( ; skb; skb = skb_dequeue(rxq)) { + if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { + ns = timehi << 16 | timelo; + + mutex_lock(&chip->reg_lock); + ns = timecounter_cyc2time(&chip->tstamp_tc, ns); + mutex_unlock(&chip->reg_lock); + shwt = skb_hwtstamps(skb); + memset(shwt, 0, sizeof(*shwt)); + shwt->hwtstamp = ns_to_ktime(ns); + status &= ~MV88E6XXX_PTP_TS_VALID; + } + netif_rx_ni(skb); + } +} + +static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_port_hwtstamp *ps) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&ps->rx_queue); + + if (skb) + mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS, + &ps->rx_queue); + + skb = skb_dequeue(&ps->rx_queue2); + if (skb) + mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS, + &ps->rx_queue2); +} + +static int is_pdelay_resp(u8 *msgtype) +{ + return (*msgtype & 0xf) == 3; +} + +bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct mv88e6xxx_port_hwtstamp *ps; + struct mv88e6xxx_chip *chip; + u8 *hdr; + + chip = ds->priv; + ps = &chip->port_hwtstamp[port]; + + if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT) + return false; + + hdr = mv88e6xxx_should_tstamp(chip, port, skb, type); + if (!hdr) + return false; + + SKB_PTP_TYPE(skb) = type; + + if (is_pdelay_resp(hdr)) + skb_queue_tail(&ps->rx_queue2, skb); + else + skb_queue_tail(&ps->rx_queue, skb); + + ptp_schedule_worker(chip->ptp_clock, 0); + + return true; +} + +static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_port_hwtstamp *ps) +{ + struct skb_shared_hwtstamps shhwtstamps; + u16 departure_block[4], status; + struct sk_buff *tmp_skb; + u32 time_raw; + int err; + u64 ns; + + if (!ps->tx_skb) + return 0; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_ptp_read(chip, ps->port_id, + MV88E6XXX_PORT_PTP_DEP_STS, + departure_block, + ARRAY_SIZE(departure_block)); + mutex_unlock(&chip->reg_lock); + + if (err) + goto free_and_clear_skb; + + if (!(departure_block[0] & MV88E6XXX_PTP_TS_VALID)) { + if (time_is_before_jiffies(ps->tx_tstamp_start + + TX_TSTAMP_TIMEOUT)) { + dev_warn(chip->dev, "p%d: clearing tx timestamp hang\n", + ps->port_id); + goto free_and_clear_skb; + } + /* The timestamp should be available quickly, while getting it + * is high priority and time bounded to only 10ms. A poll is + * warranted so restart the work. + */ + return 1; + } + + /* We have the timestamp; go ahead and clear valid now */ + mutex_lock(&chip->reg_lock); + mv88e6xxx_port_ptp_write(chip, ps->port_id, + MV88E6XXX_PORT_PTP_DEP_STS, 0); + mutex_unlock(&chip->reg_lock); + + status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK; + if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) { + dev_warn(chip->dev, "p%d: tx timestamp overrun\n", ps->port_id); + goto free_and_clear_skb; + } + + if (departure_block[3] != ps->tx_seq_id) { + dev_warn(chip->dev, "p%d: unexpected seq. id\n", ps->port_id); + goto free_and_clear_skb; + } + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + time_raw = ((u32)departure_block[2] << 16) | departure_block[1]; + mutex_lock(&chip->reg_lock); + ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw); + mutex_unlock(&chip->reg_lock); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + + dev_dbg(chip->dev, + "p%d: txtstamp %llx status 0x%04x skb ID 0x%04x hw ID 0x%04x\n", + ps->port_id, ktime_to_ns(shhwtstamps.hwtstamp), + departure_block[0], ps->tx_seq_id, departure_block[3]); + + /* skb_complete_tx_timestamp() will free up the client to make + * another timestamp-able transmit. We have to be ready for it + * -- by clearing the ps->tx_skb "flag" -- beforehand. + */ + + tmp_skb = ps->tx_skb; + ps->tx_skb = NULL; + clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state); + skb_complete_tx_timestamp(tmp_skb, &shhwtstamps); + + return 0; + +free_and_clear_skb: + dev_kfree_skb_any(ps->tx_skb); + ps->tx_skb = NULL; + clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state); + + return 0; +} + +long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_port_hwtstamp *ps; + int i, restart = 0; + + for (i = 0; i < ds->num_ports; i++) { + if (!dsa_is_user_port(ds, i)) + continue; + + ps = &chip->port_hwtstamp[i]; + if (test_bit(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state)) + restart |= mv88e6xxx_txtstamp_work(chip, ps); + + mv88e6xxx_rxtstamp_work(chip, ps); + } + + return restart ? 1 : -1; +} + +bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type) +{ + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + __be16 *seq_ptr; + u8 *hdr; + + if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) + return false; + + hdr = mv88e6xxx_should_tstamp(chip, port, clone, type); + if (!hdr) + return false; + + seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID); + + if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, + &ps->state)) + return false; + + ps->tx_skb = clone; + ps->tx_tstamp_start = jiffies; + ps->tx_seq_id = be16_to_cpup(seq_ptr); + + ptp_schedule_worker(chip->ptp_clock, 0); + return true; +} + +static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; + + ps->port_id = port; + + skb_queue_head_init(&ps->rx_queue); + skb_queue_head_init(&ps->rx_queue2); + + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); +} + +int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) +{ + int err; + int i; + + /* Disable timestamping on all ports. */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + err = mv88e6xxx_hwtstamp_port_setup(chip, i); + if (err) + return err; + } + + /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to + * timestamp. This affects all ports that have timestamping enabled, + * but the timestamp config is per-port; thus we configure all events + * here and only support the HWTSTAMP_FILTER_*_EVENT filter types. + */ + err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_MSGTYPE, + MV88E6XXX_PTP_MSGTYPE_ALL_EVENT); + if (err) + return err; + + /* Use ARRIVAL1 for peer delay response messages. */ + err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_TS_ARRIVAL_PTR, + MV88E6XXX_PTP_MSGTYPE_PDLAY_RES); + if (err) + return err; + + /* 88E6341 devices default to timestamping at the PHY, but this has + * a hardware issue that results in unreliable timestamps. Force + * these devices to timestamp at the MAC. + */ + if (chip->info->family == MV88E6XXX_FAMILY_6341) { + u16 val = MV88E6341_PTP_CFG_UPDATE | + MV88E6341_PTP_CFG_MODE_IDX | + MV88E6341_PTP_CFG_MODE_TS_AT_MAC; + err = mv88e6xxx_ptp_write(chip, MV88E6341_PTP_CFG, val); + if (err) + return err; + } + + return 0; +} + +void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip) +{ +} diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h new file mode 100644 index 000000000000..bc71c9212a08 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -0,0 +1,172 @@ +/* + * Marvell 88E6xxx Switch hardware timestamping support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 National Instruments + * Erik Hons <erik.hons@ni.com> + * Brandon Streiff <brandon.streiff@ni.com> + * Dane Wagner <dane.wagner@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _MV88E6XXX_HWTSTAMP_H +#define _MV88E6XXX_HWTSTAMP_H + +#include "chip.h" + +/* Global PTP registers */ +/* Offset 0x00: PTP EtherType */ +#define MV88E6XXX_PTP_ETHERTYPE 0x00 + +/* Offset 0x01: Message Type Timestamp Enables */ +#define MV88E6XXX_PTP_MSGTYPE 0x01 +#define MV88E6XXX_PTP_MSGTYPE_SYNC 0x0001 +#define MV88E6XXX_PTP_MSGTYPE_DELAY_REQ 0x0002 +#define MV88E6XXX_PTP_MSGTYPE_PDLAY_REQ 0x0004 +#define MV88E6XXX_PTP_MSGTYPE_PDLAY_RES 0x0008 +#define MV88E6XXX_PTP_MSGTYPE_ALL_EVENT 0x000f + +/* Offset 0x02: Timestamp Arrival Capture Pointers */ +#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02 + +/* Offset 0x07: PTP Global Configuration */ +#define MV88E6341_PTP_CFG 0x07 +#define MV88E6341_PTP_CFG_UPDATE 0x8000 +#define MV88E6341_PTP_CFG_IDX_MASK 0x7f00 +#define MV88E6341_PTP_CFG_DATA_MASK 0x00ff +#define MV88E6341_PTP_CFG_MODE_IDX 0x0 +#define MV88E6341_PTP_CFG_MODE_TS_AT_PHY 0x00 +#define MV88E6341_PTP_CFG_MODE_TS_AT_MAC 0x80 + +/* Offset 0x08: PTP Interrupt Status */ +#define MV88E6XXX_PTP_IRQ_STATUS 0x08 + +/* Per-Port PTP Registers */ +/* Offset 0x00: PTP Configuration 0 */ +#define MV88E6XXX_PORT_PTP_CFG0 0x00 +#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12 +#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_MASK 0xf000 +#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_1588 0x0000 +#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_8021AS 0x1000 +#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH 0x0800 +#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_OVERWRITE 0x0002 +#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP 0x0001 + +/* Offset 0x01: PTP Configuration 1 */ +#define MV88E6XXX_PORT_PTP_CFG1 0x01 + +/* Offset 0x02: PTP Configuration 2 */ +#define MV88E6XXX_PORT_PTP_CFG2 0x02 +#define MV88E6XXX_PORT_PTP_CFG2_EMBED_ARRIVAL 0x1000 +#define MV88E6XXX_PORT_PTP_CFG2_DEP_IRQ_EN 0x0002 +#define MV88E6XXX_PORT_PTP_CFG2_ARR_IRQ_EN 0x0001 + +/* Offset 0x03: PTP LED Configuration */ +#define MV88E6XXX_PORT_PTP_LED_CFG 0x03 + +/* Offset 0x08: PTP Arrival 0 Status */ +#define MV88E6XXX_PORT_PTP_ARR0_STS 0x08 + +/* Offset 0x09/0x0A: PTP Arrival 0 Time */ +#define MV88E6XXX_PORT_PTP_ARR0_TIME_LO 0x09 +#define MV88E6XXX_PORT_PTP_ARR0_TIME_HI 0x0a + +/* Offset 0x0B: PTP Arrival 0 Sequence ID */ +#define MV88E6XXX_PORT_PTP_ARR0_SEQID 0x0b + +/* Offset 0x0C: PTP Arrival 1 Status */ +#define MV88E6XXX_PORT_PTP_ARR1_STS 0x0c + +/* Offset 0x0D/0x0E: PTP Arrival 1 Time */ +#define MV88E6XXX_PORT_PTP_ARR1_TIME_LO 0x0d +#define MV88E6XXX_PORT_PTP_ARR1_TIME_HI 0x0e + +/* Offset 0x0F: PTP Arrival 1 Sequence ID */ +#define MV88E6XXX_PORT_PTP_ARR1_SEQID 0x0f + +/* Offset 0x10: PTP Departure Status */ +#define MV88E6XXX_PORT_PTP_DEP_STS 0x10 + +/* Offset 0x11/0x12: PTP Deperture Time */ +#define MV88E6XXX_PORT_PTP_DEP_TIME_LO 0x11 +#define MV88E6XXX_PORT_PTP_DEP_TIME_HI 0x12 + +/* Offset 0x13: PTP Departure Sequence ID */ +#define MV88E6XXX_PORT_PTP_DEP_SEQID 0x13 + +/* Status fields for arrival and depature timestamp status registers */ +#define MV88E6XXX_PTP_TS_STATUS_MASK 0x0006 +#define MV88E6XXX_PTP_TS_STATUS_NORMAL 0x0000 +#define MV88E6XXX_PTP_TS_STATUS_OVERWITTEN 0x0002 +#define MV88E6XXX_PTP_TS_STATUS_DISCARDED 0x0004 +#define MV88E6XXX_PTP_TS_VALID 0x0001 + +#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP + +int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr); +int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr); + +bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type); +bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type); + +int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info); + +int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); +void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); + +#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ + +static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, + int port, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, + int port, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static inline bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, + unsigned int type) +{ + return false; +} + +static inline bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, + unsigned int type) +{ + return false; +} + +static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) +{ + return 0; +} + +static inline void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip) +{ +} + +#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ + +#endif /* _MV88E6XXX_HWTSTAMP_H */ diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c new file mode 100644 index 000000000000..bd85e2c390e1 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -0,0 +1,381 @@ +/* + * Marvell 88E6xxx Switch PTP support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 National Instruments + * Erik Hons <erik.hons@ni.com> + * Brandon Streiff <brandon.streiff@ni.com> + * Dane Wagner <dane.wagner@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "chip.h" +#include "global2.h" +#include "ptp.h" + +/* Raw timestamps are in units of 8-ns clock periods. */ +#define CC_SHIFT 28 +#define CC_MULT (8 << CC_SHIFT) +#define CC_MULT_NUM (1 << 9) +#define CC_MULT_DEM 15625ULL + +#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100) + +#define cc_to_chip(cc) container_of(cc, struct mv88e6xxx_chip, tstamp_cc) +#define dw_overflow_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \ + overflow_work) +#define dw_tai_event_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \ + tai_event_work) + +static int mv88e6xxx_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + if (!chip->info->ops->avb_ops->tai_read) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->tai_read(chip, addr, data, len); +} + +static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data) +{ + if (!chip->info->ops->avb_ops->tai_write) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->tai_write(chip, addr, data); +} + +/* TODO: places where this are called should be using pinctrl */ +static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, + int func, int input) +{ + int err; + + if (!chip->info->ops->gpio_ops) + return -EOPNOTSUPP; + + err = chip->info->ops->gpio_ops->set_dir(chip, pin, input); + if (err) + return err; + + return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); +} + +static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + u16 phc_time[2]; + int err; + + err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time, + ARRAY_SIZE(phc_time)); + if (err) + return 0; + else + return ((u32)phc_time[1] << 16) | phc_time[0]; +} + +/* mv88e6xxx_config_eventcap - configure TAI event capture + * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) + * @rising: zero for falling-edge trigger, else rising-edge trigger + * + * This will also reset the capture sequence counter. + */ +static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, + int rising) +{ + u16 global_config; + u16 cap_config; + int err; + + chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE | + MV88E6XXX_TAI_CFG_CAP_CTR_START; + if (!rising) + chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING; + + global_config = (chip->evcap_config | chip->trig_config); + err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config); + if (err) + return err; + + if (event == PTP_CLOCK_PPS) { + cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG; + } else if (event == PTP_CLOCK_EXTTS) { + /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */ + cap_config = 0; + } else { + return -EINVAL; + } + + /* Write the capture config; this also clears the capture counter */ + err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, + cap_config); + + return err; +} + +static void mv88e6xxx_tai_event_work(struct work_struct *ugly) +{ + struct delayed_work *dw = to_delayed_work(ugly); + struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw); + struct ptp_clock_event ev; + u16 status[4]; + u32 raw_ts; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS, + status, ARRAY_SIZE(status)); + mutex_unlock(&chip->reg_lock); + + if (err) { + dev_err(chip->dev, "failed to read TAI status register\n"); + return; + } + if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) { + dev_warn(chip->dev, "missed event capture\n"); + return; + } + if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID)) + goto out; + + raw_ts = ((u32)status[2] << 16) | status[1]; + + /* Clear the valid bit so the next timestamp can come in */ + status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID; + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]); + mutex_unlock(&chip->reg_lock); + + /* This is an external timestamp */ + ev.type = PTP_CLOCK_EXTTS; + + /* We only have one timestamping channel. */ + ev.index = 0; + mutex_lock(&chip->reg_lock); + ev.timestamp = timecounter_cyc2time(&chip->tstamp_tc, raw_ts); + mutex_unlock(&chip->reg_lock); + + ptp_clock_event(chip->ptp_clock, &ev); +out: + schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL); +} + +static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + int neg_adj = 0; + u32 diff, mult; + u64 adj; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + mult = CC_MULT; + adj = CC_MULT_NUM; + adj *= scaled_ppm; + diff = div_u64(adj, CC_MULT_DEM); + + mutex_lock(&chip->reg_lock); + + timecounter_read(&chip->tstamp_tc); + chip->tstamp_cc.mult = neg_adj ? mult - diff : mult + diff; + + mutex_unlock(&chip->reg_lock); + + return 0; +} + +static int mv88e6xxx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + + mutex_lock(&chip->reg_lock); + timecounter_adjtime(&chip->tstamp_tc, delta); + mutex_unlock(&chip->reg_lock); + + return 0; +} + +static int mv88e6xxx_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + u64 ns; + + mutex_lock(&chip->reg_lock); + ns = timecounter_read(&chip->tstamp_tc); + mutex_unlock(&chip->reg_lock); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + u64 ns; + + ns = timespec64_to_ns(ts); + + mutex_lock(&chip->reg_lock); + timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ns); + mutex_unlock(&chip->reg_lock); + + return 0; +} + +static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, + struct ptp_clock_request *rq, int on) +{ + int rising = (rq->extts.flags & PTP_RISING_EDGE); + int func; + int pin; + int err; + + pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index); + + if (pin < 0) + return -EBUSY; + + mutex_lock(&chip->reg_lock); + + if (on) { + func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ; + + err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + if (err) + goto out; + + schedule_delayed_work(&chip->tai_event_work, + TAI_EVENT_WORK_INTERVAL); + + err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); + } else { + func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO; + + err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + + cancel_delayed_work_sync(&chip->tai_event_work); + } + +out: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mv88e6xxx_ptp_enable_extts(chip, rq, on); + default: + return -EOPNOTSUPP; + } +} + +static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + case PTP_PF_PEROUT: + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + return 0; +} + +/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 + * seconds; this task forces periodic reads so that we don't miss any. + */ +#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16) +static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct mv88e6xxx_chip *chip = dw_overflow_to_chip(dw); + struct timespec64 ts; + + mv88e6xxx_ptp_gettime(&chip->ptp_clock_info, &ts); + + schedule_delayed_work(&chip->overflow_work, + MV88E6XXX_TAI_OVERFLOW_PERIOD); +} + +int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) +{ + int i; + + /* Set up the cycle counter */ + memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc)); + chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read; + chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32); + chip->tstamp_cc.mult = CC_MULT; + chip->tstamp_cc.shift = CC_SHIFT; + + timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check); + INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work); + + chip->ptp_clock_info.owner = THIS_MODULE; + snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), + dev_name(chip->dev)); + chip->ptp_clock_info.max_adj = 1000000; + + chip->ptp_clock_info.n_ext_ts = 1; + chip->ptp_clock_info.n_per_out = 0; + chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip); + chip->ptp_clock_info.pps = 0; + + for (i = 0; i < chip->ptp_clock_info.n_pins; ++i) { + struct ptp_pin_desc *ppd = &chip->pin_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "mv88e6xxx_gpio%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + chip->ptp_clock_info.pin_config = chip->pin_config; + + chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine; + chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; + chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; + chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime; + chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable; + chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify; + chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; + + chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); + if (IS_ERR(chip->ptp_clock)) + return PTR_ERR(chip->ptp_clock); + + schedule_delayed_work(&chip->overflow_work, + MV88E6XXX_TAI_OVERFLOW_PERIOD); + + return 0; +} + +void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) +{ + if (chip->ptp_clock) { + cancel_delayed_work_sync(&chip->overflow_work); + cancel_delayed_work_sync(&chip->tai_event_work); + + ptp_clock_unregister(chip->ptp_clock); + chip->ptp_clock = NULL; + } +} diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h new file mode 100644 index 000000000000..10f271ab650d --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -0,0 +1,108 @@ +/* + * Marvell 88E6xxx Switch PTP support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 National Instruments + * Erik Hons <erik.hons@ni.com> + * Brandon Streiff <brandon.streiff@ni.com> + * Dane Wagner <dane.wagner@ni.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _MV88E6XXX_PTP_H +#define _MV88E6XXX_PTP_H + +#include "chip.h" + +/* Offset 0x00: TAI Global Config */ +#define MV88E6XXX_TAI_CFG 0x00 +#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000 +#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000 +#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000 +#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000 +#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400 +#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200 +#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100 +#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080 +#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008 +#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004 +#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002 +#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001 + +/* Offset 0x01: Timestamp Clock Period (ps) */ +#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01 + +/* Offset 0x02/0x03: Trigger Generation Amount */ +#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02 +#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03 + +/* Offset 0x04: Clock Compensation */ +#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04 + +/* Offset 0x05: Trigger Configuration */ +#define MV88E6XXX_TAI_TRIG_CFG 0x05 + +/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */ +#define MV88E6XXX_TAI_IRL_AMOUNT 0x06 + +/* Offset 0x07: Ingress Rate Limiter Compensation */ +#define MV88E6XXX_TAI_IRL_COMP 0x07 + +/* Offset 0x08: Ingress Rate Limiter Compensation */ +#define MV88E6XXX_TAI_IRL_COMP_PS 0x08 + +/* Offset 0x09: Event Status */ +#define MV88E6XXX_TAI_EVENT_STATUS 0x09 +#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000 +#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200 +#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100 +#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff + +/* Offset 0x0A/0x0B: Event Time */ +#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a +#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b + +/* Offset 0x0E/0x0F: PTP Global Time */ +#define MV88E6XXX_TAI_TIME_LO 0x0e +#define MV88E6XXX_TAI_TIME_HI 0x0f + +/* Offset 0x10/0x11: Trig Generation Time */ +#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10 +#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11 + +/* Offset 0x12: Lock Status */ +#define MV88E6XXX_TAI_LOCK_STATUS 0x12 + +#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP + +long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); +int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip); +void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); + +#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ + ptp_clock_info) + +#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ + +static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) +{ + return -1; +} + +static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) +{ + return 0; +} + +static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) +{ +} + +#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ + +#endif /* _MV88E6XXX_PTP_H */ diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index f3c01119b3d1..fb058fd35c0d 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -55,18 +55,30 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) return err; } -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) { - int err; u8 cmode; + int err; err = mv88e6xxx_port_get_cmode(chip, port, &cmode); - if (err) - return err; + if (err) { + dev_err(chip->dev, "failed to read cmode\n"); + return false; + } if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) || (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) || - (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) { + (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) + return true; + + return false; +} + +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +{ + int err; + + if (mv88e6352_port_has_serdes(chip, port)) { err = mv88e6352_serdes_power_set(chip, on); if (err < 0) return err; @@ -75,6 +87,93 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) return 0; } +struct mv88e6352_serdes_hw_stat { + char string[ETH_GSTRING_LEN]; + int sizeof_stat; + int reg; +}; + +static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = { + { "serdes_fibre_rx_error", 16, 21 }, + { "serdes_PRBS_error", 32, 24 }, +}; + +int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) +{ + if (mv88e6352_port_has_serdes(chip, port)) + return ARRAY_SIZE(mv88e6352_serdes_hw_stats); + + return 0; +} + +int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, + int port, uint8_t *data) +{ + struct mv88e6352_serdes_hw_stat *stat; + int i; + + if (!mv88e6352_port_has_serdes(chip, port)) + return 0; + + for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) { + stat = &mv88e6352_serdes_hw_stats[i]; + memcpy(data + i * ETH_GSTRING_LEN, stat->string, + ETH_GSTRING_LEN); + } + return ARRAY_SIZE(mv88e6352_serdes_hw_stats); +} + +static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip, + struct mv88e6352_serdes_hw_stat *stat) +{ + u64 val = 0; + u16 reg; + int err; + + err = mv88e6352_serdes_read(chip, stat->reg, ®); + if (err) { + dev_err(chip->dev, "failed to read statistic\n"); + return 0; + } + + val = reg; + + if (stat->sizeof_stat == 32) { + err = mv88e6352_serdes_read(chip, stat->reg + 1, ®); + if (err) { + dev_err(chip->dev, "failed to read statistic\n"); + return 0; + } + val = val << 16 | reg; + } + + return val; +} + +int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) +{ + struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port]; + struct mv88e6352_serdes_hw_stat *stat; + u64 value; + int i; + + if (!mv88e6352_port_has_serdes(chip, port)) + return 0; + + BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) > + ARRAY_SIZE(mv88e6xxx_port->serdes_stats)); + + for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) { + stat = &mv88e6352_serdes_hw_stats[i]; + value = mv88e6352_serdes_get_stat(chip, stat); + mv88e6xxx_port->serdes_stats[i] += value; + data[i] = mv88e6xxx_port->serdes_stats[i]; + } + + return ARRAY_SIZE(mv88e6352_serdes_hw_stats); +} + /* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on) { diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 5c1cd6d8e9a5..1897c01c6e19 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -44,5 +44,9 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); - +int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, + int port, uint8_t *data); +int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); #endif diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 9df22ebee822..600d5ad1fbde 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -631,7 +631,7 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -qca8k_get_sset_count(struct dsa_switch *ds) +qca8k_get_sset_count(struct dsa_switch *ds, int port) { return ARRAY_SIZE(ar8327_mib); } diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 30b1c8512049..0d15a12a4560 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -219,6 +219,7 @@ static int __init dummy_init_module(void) { int i, err = 0; + down_write(&pernet_ops_rwsem); rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) @@ -233,6 +234,7 @@ static int __init dummy_init_module(void) out: rtnl_unlock(); + up_write(&pernet_ops_rwsem); return err; } diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 29c3075bfb05..9fee7c83ef9f 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -3,7 +3,7 @@ # config NET_VENDOR_8390 - bool "National Semi-conductor 8390 devices" + bool "National Semiconductor 8390 devices" default y depends on NET_VENDOR_NATSEMI ---help--- @@ -87,8 +87,7 @@ config MCF8390 config NE2000 tristate "NE2000/NE1000 support" - depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX || \ - ATARI_ETHERNEC) + depends on (ISA || (Q40 && m) || MACH_TX49XX || ATARI_ETHERNEC) select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y here. diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index f975c2fc88a3..1d650e66cc6e 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_MAC8390) += mac8390.o obj-$(CONFIG_APNE) += apne.o 8390.o obj-$(CONFIG_ARM_ETHERH) += etherh.o obj-$(CONFIG_AX88796) += ax88796.o -obj-$(CONFIG_HYDRA) += hydra.o 8390.o -obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o +obj-$(CONFIG_HYDRA) += hydra.o +obj-$(CONFIG_MCF8390) += mcf8390.o obj-$(CONFIG_NE2000) += ne.o 8390p.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o @@ -16,4 +16,4 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o obj-$(CONFIG_WD80x3) += wd.o 8390.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index c56ac9ebc08f..fe6c834c422e 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -117,7 +117,7 @@ static const char version[] = static int apne_owned; /* signal if card already owned */ static u32 apne_msg_enable; -module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, apne_msg_enable, uint, 0444); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); struct net_device * __init apne_probe(int unit) diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 245554707163..da61cf3cb3a9 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -77,8 +77,6 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron #define AX_GPOC_PPDSET BIT(6) -static u32 ax_msg_enable; - /* device private data */ struct ax_device { @@ -747,7 +745,6 @@ static int ax_init_dev(struct net_device *dev) ei_local->block_output = &ax_block_output; ei_local->get_8390_hdr = &ax_get_8390_hdr; ei_local->priv = 0; - ei_local->msg_enable = ax_msg_enable; dev->netdev_ops = &ax_netdev_ops; dev->ethtool_ops = &ax_ethtool_ops; diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7bddb8efb6d5..d422a124cd7c 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -104,7 +104,6 @@ static void AX88190_init(struct net_device *dev, int startp); static int ax_open(struct net_device *dev); static int ax_close(struct net_device *dev); static irqreturn_t ax_interrupt(int irq, void *dev_id); -static u32 axnet_msg_enable; /*====================================================================*/ @@ -151,7 +150,6 @@ static int axnet_probe(struct pcmcia_device *link) return -ENOMEM; ei_local = netdev_priv(dev); - ei_local->msg_enable = axnet_msg_enable; spin_lock_init(&ei_local->page_lock); info = PRIV(dev); diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index 11cbf22ad201..32e9627e3880 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -64,8 +64,6 @@ static char version[] = #include "lib8390.c" -static u32 etherh_msg_enable; - struct etherh_priv { void __iomem *ioc_fast; void __iomem *memc; @@ -502,18 +500,6 @@ etherh_close(struct net_device *dev) } /* - * Initialisation - */ - -static void __init etherh_banner(void) -{ - static int version_printed; - - if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) - pr_info("%s", version); -} - -/* * Read the ethernet address string from the on board rom. * This is an ascii string... */ @@ -671,8 +657,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) struct etherh_priv *eh; int ret; - etherh_banner(); - ret = ecard_request_resources(ec); if (ret) goto out; @@ -757,7 +741,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) ei_local->block_output = etherh_block_output; ei_local->get_8390_hdr = etherh_get_header; ei_local->interface_num = 0; - ei_local->msg_enable = etherh_msg_enable; etherh_reset(dev); __NS8390_init(dev, 0); diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c index 8ae249195301..941754ea78ec 100644 --- a/drivers/net/ethernet/8390/hydra.c +++ b/drivers/net/ethernet/8390/hydra.c @@ -66,7 +66,6 @@ static void hydra_block_input(struct net_device *dev, int count, static void hydra_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); static void hydra_remove_one(struct zorro_dev *z); -static u32 hydra_msg_enable; static struct zorro_device_id hydra_zorro_tbl[] = { { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, @@ -119,7 +118,6 @@ static int hydra_init(struct zorro_dev *z) int start_page, stop_page; int j; int err; - struct ei_device *ei_local; static u32 hydra_offsets[16] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, @@ -138,8 +136,6 @@ static int hydra_init(struct zorro_dev *z) start_page = NESM_START_PG; stop_page = NESM_STOP_PG; - ei_local = netdev_priv(dev); - ei_local->msg_enable = hydra_msg_enable; dev->base_addr = ioaddr; dev->irq = IRQ_AMIGA_PORTS; diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c index 60f8e2c8e726..c9c55c9eab9f 100644 --- a/drivers/net/ethernet/8390/lib8390.c +++ b/drivers/net/ethernet/8390/lib8390.c @@ -113,7 +113,7 @@ static void __NS8390_init(struct net_device *dev, int startp); static unsigned version_printed; static u32 msg_enable; -module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param(msg_enable, uint, 0444); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); /* @@ -975,6 +975,8 @@ static void ethdev_setup(struct net_device *dev) ether_setup(dev); spin_lock_init(&ei_local->page_lock); + + ei_local->msg_enable = msg_enable; } /** diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index 2f91ce8dc614..b6d735bf8011 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -123,8 +123,7 @@ enum mac8390_access { }; extern int mac8390_memtest(struct net_device *dev); -static int mac8390_initdev(struct net_device *dev, - struct nubus_rsrc *ndev, +static int mac8390_initdev(struct net_device *dev, struct nubus_board *board, enum mac8390_type type); static int mac8390_open(struct net_device *dev); @@ -168,9 +167,8 @@ static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); -static u32 mac8390_msg_enable; -static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres) +static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) { switch (fres->dr_sw) { case NUBUS_DRSW_3COM: @@ -236,7 +234,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres) return MAC8390_NONE; } -static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) +static enum mac8390_access mac8390_testio(unsigned long membase) { unsigned long outdata = 0xA5A0B5B0; unsigned long indata = 0x00000000; @@ -254,7 +252,7 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) return ACCESS_UNKNOWN; } -static int __init mac8390_memsize(unsigned long membase) +static int mac8390_memsize(unsigned long membase) { unsigned long flags; int i, j; @@ -290,36 +288,34 @@ static int __init mac8390_memsize(unsigned long membase) return i * 0x1000; } -static bool __init mac8390_init(struct net_device *dev, - struct nubus_rsrc *ndev, - enum mac8390_type cardtype) +static bool mac8390_rsrc_init(struct net_device *dev, + struct nubus_rsrc *fres, + enum mac8390_type cardtype) { + struct nubus_board *board = fres->board; struct nubus_dir dir; struct nubus_dirent ent; int offset; volatile unsigned short *i; - printk_once(KERN_INFO pr_fmt("%s"), version); - - dev->irq = SLOT2IRQ(ndev->board->slot); + dev->irq = SLOT2IRQ(board->slot); /* This is getting to be a habit */ - dev->base_addr = (ndev->board->slot_addr | - ((ndev->board->slot & 0xf) << 20)); + dev->base_addr = board->slot_addr | ((board->slot & 0xf) << 20); /* * Get some Nubus info - we will trust the card's idea * of where its memory and registers are. */ - if (nubus_get_func_dir(ndev, &dir) == -1) { - pr_err("%s: Unable to get Nubus functional directory for slot %X!\n", - dev->name, ndev->board->slot); + if (nubus_get_func_dir(fres, &dir) == -1) { + dev_err(&board->dev, + "Unable to get Nubus functional directory\n"); return false; } /* Get the MAC address */ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) { - pr_info("%s: Couldn't get MAC address!\n", dev->name); + dev_info(&board->dev, "MAC address resource not found\n"); return false; } @@ -329,8 +325,8 @@ static bool __init mac8390_init(struct net_device *dev, nubus_rewinddir(&dir); if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) { - pr_err("%s: Memory offset resource for slot %X not found!\n", - dev->name, ndev->board->slot); + dev_err(&board->dev, + "Memory offset resource not found\n"); return false; } nubus_get_rsrc_mem(&offset, &ent, 4); @@ -340,8 +336,8 @@ static bool __init mac8390_init(struct net_device *dev, nubus_rewinddir(&dir); if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) { - pr_info("%s: Memory length resource for slot %X not found, probing\n", - dev->name, ndev->board->slot); + dev_info(&board->dev, + "Memory length resource not found, probing\n"); offset = mac8390_memsize(dev->mem_start); } else { nubus_get_rsrc_mem(&offset, &ent, 4); @@ -351,25 +347,25 @@ static bool __init mac8390_init(struct net_device *dev, switch (cardtype) { case MAC8390_KINETICS: case MAC8390_DAYNA: /* it's the same */ - dev->base_addr = (int)(ndev->board->slot_addr + + dev->base_addr = (int)(board->slot_addr + DAYNA_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + + dev->mem_start = (int)(board->slot_addr + DAYNA_8390_MEM); dev->mem_end = dev->mem_start + mac8390_memsize(dev->mem_start); break; case MAC8390_INTERLAN: - dev->base_addr = (int)(ndev->board->slot_addr + + dev->base_addr = (int)(board->slot_addr + INTERLAN_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + + dev->mem_start = (int)(board->slot_addr + INTERLAN_8390_MEM); dev->mem_end = dev->mem_start + mac8390_memsize(dev->mem_start); break; case MAC8390_CABLETRON: - dev->base_addr = (int)(ndev->board->slot_addr + + dev->base_addr = (int)(board->slot_addr + CABLETRON_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + + dev->mem_start = (int)(board->slot_addr + CABLETRON_8390_MEM); /* The base address is unreadable if 0x00 * has been written to the command register @@ -384,8 +380,8 @@ static bool __init mac8390_init(struct net_device *dev, break; default: - pr_err("Card type %s is unsupported, sorry\n", - ndev->board->name); + dev_err(&board->dev, + "No known base address for card type\n"); return false; } } @@ -393,91 +389,83 @@ static bool __init mac8390_init(struct net_device *dev, return true; } -struct net_device * __init mac8390_probe(int unit) +static int mac8390_device_probe(struct nubus_board *board) { struct net_device *dev; - struct nubus_rsrc *ndev = NULL; int err = -ENODEV; - struct ei_device *ei_local; - - static unsigned int slots; - - enum mac8390_type cardtype; - - /* probably should check for Nubus instead */ - - if (!MACH_IS_MAC) - return ERR_PTR(-ENODEV); + struct nubus_rsrc *fres; + enum mac8390_type cardtype = MAC8390_NONE; dev = ____alloc_ei_netdev(0); if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); + return -ENOMEM; - for_each_func_rsrc(ndev) { - if (ndev->category != NUBUS_CAT_NETWORK || - ndev->type != NUBUS_TYPE_ETHERNET) - continue; + SET_NETDEV_DEV(dev, &board->dev); - /* Have we seen it already? */ - if (slots & (1 << ndev->board->slot)) + for_each_board_func_rsrc(board, fres) { + if (fres->category != NUBUS_CAT_NETWORK || + fres->type != NUBUS_TYPE_ETHERNET) continue; - slots |= 1 << ndev->board->slot; - cardtype = mac8390_ident(ndev); + cardtype = mac8390_ident(fres); if (cardtype == MAC8390_NONE) continue; - if (!mac8390_init(dev, ndev, cardtype)) - continue; - - /* Do the nasty 8390 stuff */ - if (!mac8390_initdev(dev, ndev, cardtype)) + if (mac8390_rsrc_init(dev, fres, cardtype)) break; } - - if (!ndev) + if (!fres) goto out; - ei_local = netdev_priv(dev); - ei_local->msg_enable = mac8390_msg_enable; + err = mac8390_initdev(dev, board, cardtype); + if (err) + goto out; err = register_netdev(dev); if (err) goto out; - return dev; + + nubus_set_drvdata(board, dev); + return 0; out: free_netdev(dev); - return ERR_PTR(err); + return err; +} + +static int mac8390_device_remove(struct nubus_board *board) +{ + struct net_device *dev = nubus_get_drvdata(board); + + unregister_netdev(dev); + free_netdev(dev); + return 0; } -#ifdef MODULE +static struct nubus_driver mac8390_driver = { + .probe = mac8390_device_probe, + .remove = mac8390_device_remove, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + } +}; + MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others"); MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver"); MODULE_LICENSE("GPL"); -static struct net_device *dev_mac8390; - -int __init init_module(void) +static int __init mac8390_init(void) { - dev_mac8390 = mac8390_probe(-1); - if (IS_ERR(dev_mac8390)) { - pr_warn("mac8390: No card found\n"); - return PTR_ERR(dev_mac8390); - } - return 0; + return nubus_driver_register(&mac8390_driver); } +module_init(mac8390_init); -void __exit cleanup_module(void) +static void __exit mac8390_exit(void) { - unregister_netdev(dev_mac8390); - free_netdev(dev_mac8390); + nubus_driver_unregister(&mac8390_driver); } - -#endif /* MODULE */ +module_exit(mac8390_exit); static const struct net_device_ops mac8390_netdev_ops = { .ndo_open = mac8390_open, @@ -493,9 +481,8 @@ static const struct net_device_ops mac8390_netdev_ops = { #endif }; -static int __init mac8390_initdev(struct net_device *dev, - struct nubus_rsrc *ndev, - enum mac8390_type type) +static int mac8390_initdev(struct net_device *dev, struct nubus_board *board, + enum mac8390_type type) { static u32 fwrd4_offsets[16] = { 0, 4, 8, 12, @@ -546,7 +533,8 @@ static int __init mac8390_initdev(struct net_device *dev, case MAC8390_APPLE: switch (mac8390_testio(dev->mem_start)) { case ACCESS_UNKNOWN: - pr_err("Don't know how to access card memory!\n"); + dev_err(&board->dev, + "Don't know how to access card memory\n"); return -ENODEV; case ACCESS_16: @@ -612,21 +600,18 @@ static int __init mac8390_initdev(struct net_device *dev, break; default: - pr_err("Card type %s is unsupported, sorry\n", - ndev->board->name); + dev_err(&board->dev, "Unsupported card type\n"); return -ENODEV; } __NS8390_init(dev, 0); /* Good, done, now spit out some messages */ - pr_info("%s: %s in slot %X (type %s)\n", - dev->name, ndev->board->name, ndev->board->slot, - cardname[type]); - pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", - dev->dev_addr, dev->irq, - (unsigned int)(dev->mem_end - dev->mem_start) >> 10, - dev->mem_start, access_bitmode ? 32 : 16); + dev_info(&board->dev, "%s (type %s)\n", board->name, cardname[type]); + dev_info(&board->dev, "MAC %pM, IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", + dev->dev_addr, dev->irq, + (unsigned int)(dev->mem_end - dev->mem_start) >> 10, + dev->mem_start, access_bitmode ? 32 : 16); return 0; } diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4bb967bc879e..4ad8031ab669 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -38,7 +38,6 @@ static const char version[] = #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -static u32 mcf8390_msg_enable; #ifdef NE2000_ODDOFFSET /* @@ -407,7 +406,6 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct ei_device *ei_local; struct resource *mem, *irq; resource_size_t msize; int ret; @@ -435,8 +433,6 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - ei_local = netdev_priv(dev); - ei_local->msg_enable = mcf8390_msg_enable; dev->irq = irq->start; dev->base_addr = mem->start; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 66f47987e2a2..ac99d089ac72 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -77,7 +77,7 @@ static u32 ne_msg_enable; module_param_hw_array(io, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); module_param_array(bad, int, NULL, 0); -module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, ne_msg_enable, uint, 0444); MODULE_PARM_DESC(io, "I/O base address(es),required"); MODULE_PARM_DESC(irq, "IRQ number(s)"); MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures"); @@ -99,7 +99,7 @@ MODULE_LICENSE("GPL"); that the ne2k probe is the last 8390 based probe to take place (as it is at boot) and so the probe will get confused by any other 8390 cards. ISA device autoprobes on a running machine are not recommended anyway. */ -#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R)) +#if !defined(MODULE) && defined(CONFIG_ISA) /* Do we need a portlist for the ISA auto-probe ? */ #define NEEDS_PORTLIST #endif @@ -164,12 +164,7 @@ bad_clone_list[] __initdata = { #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -#if defined(CONFIG_PLAT_MAPPI) -# define DCR_VAL 0x4b -#elif defined(CONFIG_PLAT_OAKS32R) || \ - defined(CONFIG_MACH_TX49XX) -# define DCR_VAL 0x48 /* 8-bit mode */ -#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ +#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ # define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) #else # define DCR_VAL 0x49 @@ -422,12 +417,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) stop_page = NE1SM_STOP_PG; } -#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) - neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57) - || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42)); -#else neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); -#endif ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00); @@ -485,7 +475,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) mdelay(10); /* wait 10ms for interrupt to propagate */ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */ dev->irq = probe_irq_off(cookie); - if (netif_msg_probe(ei_local)) + if (ne_msg_enable & NETIF_MSG_PROBE) pr_cont(" autoirq is %d", dev->irq); } else if (dev->irq == 2) /* Fixup for users that don't know that IRQ 2 is really IRQ 9, @@ -508,18 +498,9 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) dev->base_addr = ioaddr; -#ifdef CONFIG_PLAT_MAPPI - outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, - ioaddr + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = SA_prom[i] - = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); - } -#else for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = SA_prom[i]; } -#endif pr_cont("%pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 1bdea746926c..42985a82321a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -76,7 +76,7 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker"); MODULE_DESCRIPTION("PCI NE2000 clone driver"); MODULE_LICENSE("GPL"); -module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, ne2k_msg_enable, uint, 0444); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index bcad4a7fac9f..61e43802b9a5 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -66,7 +66,6 @@ #define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ static const char *if_names[] = { "auto", "10baseT", "10base2"}; -static u32 pcnet_msg_enable; /*====================================================================*/ @@ -556,7 +555,6 @@ static int pcnet_config(struct pcmcia_device *link) int start_pg, stop_pg, cm_offset; int has_shmem = 0; struct hw_info *local_hw_info; - struct ei_device *ei_local; dev_dbg(&link->dev, "pcnet_config\n"); @@ -606,8 +604,6 @@ static int pcnet_config(struct pcmcia_device *link) mii_phy_probe(dev); SET_NETDEV_DEV(dev, &link->dev); - ei_local = netdev_priv(dev); - ei_local->msg_enable = pcnet_msg_enable; if (register_netdev(dev) != 0) { pr_notice("register_netdev() failed\n"); diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 4e02f6a23575..3fe3b4dfa7c5 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -563,7 +563,7 @@ static int irq[MAX_ULTRA_CARDS]; module_param_hw_array(io, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); -module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, ultra_msg_enable, uint, 0444); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c index aca957d4e121..1f0670cd3ea3 100644 --- a/drivers/net/ethernet/8390/stnic.c +++ b/drivers/net/ethernet/8390/stnic.c @@ -71,7 +71,7 @@ static void stnic_init (struct net_device *dev); static u32 stnic_msg_enable; -module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, stnic_msg_enable, uint, 0444); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); /* SH7750 specific read/write io. */ diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index 6efa2722f850..c834123560f1 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -299,7 +299,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */ - if (netif_msg_drv(ei_local)) + if (wd_msg_enable & NETIF_MSG_PROBE) pr_cont(" autoirq is %d", dev->irq); if (dev->irq < 2) dev->irq = word16 ? 10 : 5; @@ -507,7 +507,7 @@ module_param_hw_array(io, int, ioport, NULL, 0); module_param_hw_array(irq, int, irq, NULL, 0); module_param_hw_array(mem, int, iomem, NULL, 0); module_param_hw_array(mem_end, int, iomem, NULL, 0); -module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_named(msg_enable, wd_msg_enable, uint, 0444); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)"); MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)"); diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c index 6d93956b293b..35a500a21521 100644 --- a/drivers/net/ethernet/8390/zorro8390.c +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -44,8 +44,6 @@ static const char version[] = "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; -static u32 zorro8390_msg_enable; - #include "lib8390.c" #define DRV_NAME "zorro8390" @@ -296,7 +294,6 @@ static int zorro8390_init(struct net_device *dev, unsigned long board, int err; unsigned char SA_prom[32]; int start_page, stop_page; - struct ei_device *ei_local = netdev_priv(dev); static u32 zorro8390_offsets[16] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, @@ -388,8 +385,6 @@ static int zorro8390_init(struct net_device *dev, unsigned long board, dev->netdev_ops = &zorro8390_netdev_ops; __NS8390_init(dev, 0); - ei_local->msg_enable = zorro8390_msg_enable; - err = register_netdev(dev); if (err) { free_irq(IRQ_AMIGA_PORTS, dev); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index b6cf4b6962f5..603a5704dab8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -34,7 +34,6 @@ source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" source "drivers/net/ethernet/cadence/Kconfig" -source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" source "drivers/net/ethernet/calxeda/Kconfig" @@ -129,6 +128,7 @@ config FEALNX source "drivers/net/ethernet/natsemi/Kconfig" source "drivers/net/ethernet/netronome/Kconfig" +source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/8390/Kconfig" config NET_NETX @@ -176,7 +176,6 @@ source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" -source "drivers/net/ethernet/tile/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" source "drivers/net/ethernet/via/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 3cdf01e96e0b..2bfd2eea50bf 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ obj-$(CONFIG_NET_CADENCE) += cadence/ -obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ @@ -61,6 +60,7 @@ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ +obj-$(CONFIG_NET_VENDOR_NI) += ni/ obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ @@ -88,7 +88,6 @@ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ -obj-$(CONFIG_TILE_NET) += tile/ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig deleted file mode 100644 index 98cc8f535021..000000000000 --- a/drivers/net/ethernet/adi/Kconfig +++ /dev/null @@ -1,66 +0,0 @@ -# -# Blackfin device configuration -# - -config NET_BFIN - bool "Blackfin devices" - depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537 - ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. - - If unsure, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the remaining Blackfin card questions. If you say Y, you will be - asked for your specific card in the following questions. - -if NET_BFIN - -config BFIN_MAC - tristate "Blackfin on-chip MAC support" - depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) - select CRC32 - select MII - select PHYLIB - select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE - ---help--- - This is the driver for Blackfin on-chip mac device. Say Y if you want - it compiled into the kernel. This driver is also available as a - module ( = code which can be inserted in and removed from the running - kernel whenever you want). The module will be called bfin_mac. - -config BFIN_MAC_USE_L1 - bool "Use L1 memory for rx/tx packets" - depends on BFIN_MAC && (BF527 || BF537) - default y - ---help--- - To get maximum network performance, you should use L1 memory as rx/tx - buffers. Say N here if you want to reserve L1 memory for other uses. - -config BFIN_TX_DESC_NUM - int "Number of transmit buffer packets" - depends on BFIN_MAC - range 6 10 if BFIN_MAC_USE_L1 - range 10 100 - default "10" - ---help--- - Set the number of buffer packets used in driver. - -config BFIN_RX_DESC_NUM - int "Number of receive buffer packets" - depends on BFIN_MAC - range 20 64 - default "20" - ---help--- - Set the number of buffer packets used in driver. - -config BFIN_MAC_USE_HWSTAMP - bool "Use IEEE 1588 hwstamp" - depends on BFIN_MAC && BF518 - imply PTP_1588_CLOCK - default y - ---help--- - To support the IEEE 1588 Precision Time Protocol (PTP), select y here - -endif # NET_BFIN diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile deleted file mode 100644 index b1fbe195d0e8..000000000000 --- a/drivers/net/ethernet/adi/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Blackfin device drivers. -# - -obj-$(CONFIG_BFIN_MAC) += bfin_mac.o diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c deleted file mode 100644 index 7120f2b9c6ef..000000000000 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ /dev/null @@ -1,1881 +0,0 @@ -/* - * Blackfin On-Chip MAC Driver - * - * Copyright 2004-2010 Analog Devices Inc. - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Licensed under the GPL-2 or later. - */ - -#define DRV_VERSION "1.1" -#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/timer.h> -#include <linux/errno.h> -#include <linux/irq.h> -#include <linux/io.h> -#include <linux/ioport.h> -#include <linux/crc32.h> -#include <linux/device.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/skbuff.h> -#include <linux/platform_device.h> - -#include <asm/dma.h> -#include <linux/dma-mapping.h> - -#include <asm/div64.h> -#include <asm/dpmc.h> -#include <asm/blackfin.h> -#include <asm/cacheflush.h> -#include <asm/portmux.h> -#include <mach/pll.h> - -#include "bfin_mac.h" - -MODULE_AUTHOR("Bryan Wu, Luke Yang"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION(DRV_DESC); -MODULE_ALIAS("platform:bfin_mac"); - -#if defined(CONFIG_BFIN_MAC_USE_L1) -# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num) -# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr) -#else -# define bfin_mac_alloc(dma_handle, size, num) \ - dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL) -# define bfin_mac_free(dma_handle, ptr, num) \ - dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle) -#endif - -#define PKT_BUF_SZ 1580 - -#define MAX_TIMEOUT_CNT 500 - -/* pointers to maintain transmit list */ -static struct net_dma_desc_tx *tx_list_head; -static struct net_dma_desc_tx *tx_list_tail; -static struct net_dma_desc_rx *rx_list_head; -static struct net_dma_desc_rx *rx_list_tail; -static struct net_dma_desc_rx *current_rx_ptr; -static struct net_dma_desc_tx *current_tx_ptr; -static struct net_dma_desc_tx *tx_desc; -static struct net_dma_desc_rx *rx_desc; - -static void desc_list_free(void) -{ - struct net_dma_desc_rx *r; - struct net_dma_desc_tx *t; - int i; -#if !defined(CONFIG_BFIN_MAC_USE_L1) - dma_addr_t dma_handle = 0; -#endif - - if (tx_desc) { - t = tx_list_head; - for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { - if (t) { - if (t->skb) { - dev_kfree_skb(t->skb); - t->skb = NULL; - } - t = t->next; - } - } - bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM); - } - - if (rx_desc) { - r = rx_list_head; - for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { - if (r) { - if (r->skb) { - dev_kfree_skb(r->skb); - r->skb = NULL; - } - r = r->next; - } - } - bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM); - } -} - -static int desc_list_init(struct net_device *dev) -{ - int i; - struct sk_buff *new_skb; -#if !defined(CONFIG_BFIN_MAC_USE_L1) - /* - * This dma_handle is useless in Blackfin dma_alloc_coherent(). - * The real dma handler is the return value of dma_alloc_coherent(). - */ - dma_addr_t dma_handle; -#endif - - tx_desc = bfin_mac_alloc(&dma_handle, - sizeof(struct net_dma_desc_tx), - CONFIG_BFIN_TX_DESC_NUM); - if (tx_desc == NULL) - goto init_error; - - rx_desc = bfin_mac_alloc(&dma_handle, - sizeof(struct net_dma_desc_rx), - CONFIG_BFIN_RX_DESC_NUM); - if (rx_desc == NULL) - goto init_error; - - /* init tx_list */ - tx_list_head = tx_list_tail = tx_desc; - - for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { - struct net_dma_desc_tx *t = tx_desc + i; - struct dma_descriptor *a = &(t->desc_a); - struct dma_descriptor *b = &(t->desc_b); - - /* - * disable DMA - * read from memory WNR = 0 - * wordsize is 32 bits - * 6 half words is desc size - * large desc flow - */ - a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; - a->start_addr = (unsigned long)t->packet; - a->x_count = 0; - a->next_dma_desc = b; - - /* - * enabled DMA - * write to memory WNR = 1 - * wordsize is 32 bits - * disable interrupt - * 6 half words is desc size - * large desc flow - */ - b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; - b->start_addr = (unsigned long)(&(t->status)); - b->x_count = 0; - - t->skb = NULL; - tx_list_tail->desc_b.next_dma_desc = a; - tx_list_tail->next = t; - tx_list_tail = t; - } - tx_list_tail->next = tx_list_head; /* tx_list is a circle */ - tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a); - current_tx_ptr = tx_list_head; - - /* init rx_list */ - rx_list_head = rx_list_tail = rx_desc; - - for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { - struct net_dma_desc_rx *r = rx_desc + i; - struct dma_descriptor *a = &(r->desc_a); - struct dma_descriptor *b = &(r->desc_b); - - /* allocate a new skb for next time receive */ - new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); - if (!new_skb) - goto init_error; - - skb_reserve(new_skb, NET_IP_ALIGN); - /* Invalidate the data cache of skb->data range when it is write back - * cache. It will prevent overwriting the new data from DMA - */ - blackfin_dcache_invalidate_range((unsigned long)new_skb->head, - (unsigned long)new_skb->end); - r->skb = new_skb; - - /* - * enabled DMA - * write to memory WNR = 1 - * wordsize is 32 bits - * disable interrupt - * 6 half words is desc size - * large desc flow - */ - a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; - /* since RXDWA is enabled */ - a->start_addr = (unsigned long)new_skb->data - 2; - a->x_count = 0; - a->next_dma_desc = b; - - /* - * enabled DMA - * write to memory WNR = 1 - * wordsize is 32 bits - * enable interrupt - * 6 half words is desc size - * large desc flow - */ - b->config = DMAEN | WNR | WDSIZE_32 | DI_EN | - NDSIZE_6 | DMAFLOW_LARGE; - b->start_addr = (unsigned long)(&(r->status)); - b->x_count = 0; - - rx_list_tail->desc_b.next_dma_desc = a; - rx_list_tail->next = r; - rx_list_tail = r; - } - rx_list_tail->next = rx_list_head; /* rx_list is a circle */ - rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a); - current_rx_ptr = rx_list_head; - - return 0; - -init_error: - desc_list_free(); - pr_err("kmalloc failed\n"); - return -ENOMEM; -} - - -/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ - -/* - * MII operations - */ -/* Wait until the previous MDC/MDIO transaction has completed */ -static int bfin_mdio_poll(void) -{ - int timeout_cnt = MAX_TIMEOUT_CNT; - - /* poll the STABUSY bit */ - while ((bfin_read_EMAC_STAADD()) & STABUSY) { - udelay(1); - if (timeout_cnt-- < 0) { - pr_err("wait MDC/MDIO transaction to complete timeout\n"); - return -ETIMEDOUT; - } - } - - return 0; -} - -/* Read an off-chip register in a PHY through the MDC/MDIO port */ -static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) -{ - int ret; - - ret = bfin_mdio_poll(); - if (ret) - return ret; - - /* read mode */ - bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | - SET_REGAD((u16) regnum) | - STABUSY); - - ret = bfin_mdio_poll(); - if (ret) - return ret; - - return (int) bfin_read_EMAC_STADAT(); -} - -/* Write an off-chip register in a PHY through the MDC/MDIO port */ -static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, - u16 value) -{ - int ret; - - ret = bfin_mdio_poll(); - if (ret) - return ret; - - bfin_write_EMAC_STADAT((u32) value); - - /* write mode */ - bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | - SET_REGAD((u16) regnum) | - STAOP | - STABUSY); - - return bfin_mdio_poll(); -} - -static void bfin_mac_adjust_link(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - unsigned long flags; - int new_state = 0; - - spin_lock_irqsave(&lp->lock, flags); - if (phydev->link) { - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. */ - if (phydev->duplex != lp->old_duplex) { - u32 opmode = bfin_read_EMAC_OPMODE(); - new_state = 1; - - if (phydev->duplex) - opmode |= FDMODE; - else - opmode &= ~(FDMODE); - - bfin_write_EMAC_OPMODE(opmode); - lp->old_duplex = phydev->duplex; - } - - if (phydev->speed != lp->old_speed) { - if (phydev->interface == PHY_INTERFACE_MODE_RMII) { - u32 opmode = bfin_read_EMAC_OPMODE(); - switch (phydev->speed) { - case 10: - opmode |= RMII_10; - break; - case 100: - opmode &= ~RMII_10; - break; - default: - netdev_warn(dev, - "Ack! Speed (%d) is not 10/100!\n", - phydev->speed); - break; - } - bfin_write_EMAC_OPMODE(opmode); - } - - new_state = 1; - lp->old_speed = phydev->speed; - } - - if (!lp->old_link) { - new_state = 1; - lp->old_link = 1; - } - } else if (lp->old_link) { - new_state = 1; - lp->old_link = 0; - lp->old_speed = 0; - lp->old_duplex = -1; - } - - if (new_state) { - u32 opmode = bfin_read_EMAC_OPMODE(); - phy_print_status(phydev); - pr_debug("EMAC_OPMODE = 0x%08x\n", opmode); - } - - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* MDC = 2.5 MHz */ -#define MDC_CLK 2500000 - -static int mii_probe(struct net_device *dev, int phy_mode) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev; - unsigned short sysctl; - u32 sclk, mdc_div; - - /* Enable PHY output early */ - if (!(bfin_read_VR_CTL() & CLKBUFOE)) - bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); - - sclk = get_sclk(); - mdc_div = ((sclk / MDC_CLK) / 2) - 1; - - sysctl = bfin_read_EMAC_SYSCTL(); - sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); - bfin_write_EMAC_SYSCTL(sysctl); - - phydev = phy_find_first(lp->mii_bus); - if (!phydev) { - netdev_err(dev, "no phy device found\n"); - return -ENODEV; - } - - if (phy_mode != PHY_INTERFACE_MODE_RMII && - phy_mode != PHY_INTERFACE_MODE_MII) { - netdev_err(dev, "invalid phy interface mode\n"); - return -EINVAL; - } - - phydev = phy_connect(dev, phydev_name(phydev), - &bfin_mac_adjust_link, phy_mode); - - if (IS_ERR(phydev)) { - netdev_err(dev, "could not attach PHY\n"); - return PTR_ERR(phydev); - } - - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_Pause | SUPPORTED_Asym_Pause - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; - - lp->old_link = 0; - lp->old_speed = 0; - lp->old_duplex = -1; - - phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", - MDC_CLK, mdc_div, sclk / 1000000); - - return 0; -} - -/* - * Ethtool support - */ - -/* - * interrupt routine for magic packet wakeup - */ -static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id) -{ - return IRQ_HANDLED; -} - -static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); -} - -static void bfin_mac_ethtool_getwol(struct net_device *dev, - struct ethtool_wolinfo *wolinfo) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - wolinfo->supported = WAKE_MAGIC; - wolinfo->wolopts = lp->wol; -} - -static int bfin_mac_ethtool_setwol(struct net_device *dev, - struct ethtool_wolinfo *wolinfo) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - int rc; - - if (wolinfo->wolopts & (WAKE_MAGICSECURE | - WAKE_UCAST | - WAKE_MCAST | - WAKE_BCAST | - WAKE_ARP)) - return -EOPNOTSUPP; - - lp->wol = wolinfo->wolopts; - - if (lp->wol && !lp->irq_wake_requested) { - /* register wake irq handler */ - rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, - 0, "EMAC_WAKE", dev); - if (rc) - return rc; - lp->irq_wake_requested = true; - } - - if (!lp->wol && lp->irq_wake_requested) { - free_irq(IRQ_MAC_WAKEDET, dev); - lp->irq_wake_requested = false; - } - - /* Make sure the PHY driver doesn't suspend */ - device_init_wakeup(&dev->dev, lp->wol); - - return 0; -} - -#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP -static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = lp->phc_index; - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); - return 0; -} -#endif - -static const struct ethtool_ops bfin_mac_ethtool_ops = { - .get_link = ethtool_op_get_link, - .get_drvinfo = bfin_mac_ethtool_getdrvinfo, - .get_wol = bfin_mac_ethtool_getwol, - .set_wol = bfin_mac_ethtool_setwol, -#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP - .get_ts_info = bfin_mac_ethtool_get_ts_info, -#endif - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -/**************************************************************************/ -static void setup_system_regs(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - int i; - unsigned short sysctl; - - /* - * Odd word alignment for Receive Frame DMA word - * Configure checksum support and rcve frame word alignment - */ - sysctl = bfin_read_EMAC_SYSCTL(); - /* - * check if interrupt is requested for any PHY, - * enable PHY interrupt only if needed - */ - for (i = 0; i < PHY_MAX_ADDR; ++i) - if (lp->mii_bus->irq[i] != PHY_POLL) - break; - if (i < PHY_MAX_ADDR) - sysctl |= PHYIE; - sysctl |= RXDWA; -#if defined(BFIN_MAC_CSUM_OFFLOAD) - sysctl |= RXCKS; -#else - sysctl &= ~RXCKS; -#endif - bfin_write_EMAC_SYSCTL(sysctl); - - bfin_write_EMAC_MMC_CTL(RSTC | CROLL); - - /* Set vlan regs to let 1522 bytes long packets pass through */ - bfin_write_EMAC_VLAN1(lp->vlan1_mask); - bfin_write_EMAC_VLAN2(lp->vlan2_mask); - - /* Initialize the TX DMA channel registers */ - bfin_write_DMA2_X_COUNT(0); - bfin_write_DMA2_X_MODIFY(4); - bfin_write_DMA2_Y_COUNT(0); - bfin_write_DMA2_Y_MODIFY(0); - - /* Initialize the RX DMA channel registers */ - bfin_write_DMA1_X_COUNT(0); - bfin_write_DMA1_X_MODIFY(4); - bfin_write_DMA1_Y_COUNT(0); - bfin_write_DMA1_Y_MODIFY(0); -} - -static void setup_mac_addr(u8 *mac_addr) -{ - u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); - u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); - - /* this depends on a little-endian machine */ - bfin_write_EMAC_ADDRLO(addr_low); - bfin_write_EMAC_ADDRHI(addr_hi); -} - -static int bfin_mac_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - if (netif_running(dev)) - return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - setup_mac_addr(dev->dev_addr); - return 0; -} - -#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP -#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE) - -static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result) -{ - u32 ipn = 1000000000UL / input_clk; - u32 ppn = 1; - unsigned int shift = 0; - - while (ppn <= ipn) { - ppn <<= 1; - shift++; - } - *shift_result = shift; - return 1000000000UL / ppn; -} - -static int bfin_mac_hwtstamp_set(struct net_device *netdev, - struct ifreq *ifr) -{ - struct hwtstamp_config config; - struct bfin_mac_local *lp = netdev_priv(netdev); - u16 ptpctl; - u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n", - __func__, config.flags, config.tx_type, config.rx_filter); - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - if ((config.tx_type != HWTSTAMP_TX_OFF) && - (config.tx_type != HWTSTAMP_TX_ON)) - return -ERANGE; - - ptpctl = bfin_read_EMAC_PTP_CTL(); - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - /* - * Dont allow any timestamping - */ - ptpfv3 = 0xFFFFFFFF; - bfin_write_EMAC_PTP_FV3(ptpfv3); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - /* - * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL) - * to enable all the field matches. - */ - ptpctl &= ~0x1F00; - bfin_write_EMAC_PTP_CTL(ptpctl); - /* - * Keep the default values of the EMAC_PTP_FOFF register. - */ - ptpfoff = 0x4A24170C; - bfin_write_EMAC_PTP_FOFF(ptpfoff); - /* - * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 - * registers. - */ - ptpfv1 = 0x11040800; - bfin_write_EMAC_PTP_FV1(ptpfv1); - ptpfv2 = 0x0140013F; - bfin_write_EMAC_PTP_FV2(ptpfv2); - /* - * The default value (0xFFFC) allows the timestamping of both - * received Sync messages and Delay_Req messages. - */ - ptpfv3 = 0xFFFFFFFC; - bfin_write_EMAC_PTP_FV3(ptpfv3); - - config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - /* Clear all five comparison mask bits (bits[12:8]) in the - * EMAC_PTP_CTL register to enable all the field matches. - */ - ptpctl &= ~0x1F00; - bfin_write_EMAC_PTP_CTL(ptpctl); - /* - * Keep the default values of the EMAC_PTP_FOFF register, except set - * the PTPCOF field to 0x2A. - */ - ptpfoff = 0x2A24170C; - bfin_write_EMAC_PTP_FOFF(ptpfoff); - /* - * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 - * registers. - */ - ptpfv1 = 0x11040800; - bfin_write_EMAC_PTP_FV1(ptpfv1); - ptpfv2 = 0x0140013F; - bfin_write_EMAC_PTP_FV2(ptpfv2); - /* - * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set - * the value to 0xFFF0. - */ - ptpfv3 = 0xFFFFFFF0; - bfin_write_EMAC_PTP_FV3(ptpfv3); - - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - /* - * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the - * EFTM and PTPCM field comparison. - */ - ptpctl &= ~0x1100; - bfin_write_EMAC_PTP_CTL(ptpctl); - /* - * Keep the default values of all the fields of the EMAC_PTP_FOFF - * register, except set the PTPCOF field to 0x0E. - */ - ptpfoff = 0x0E24170C; - bfin_write_EMAC_PTP_FOFF(ptpfoff); - /* - * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which - * corresponds to PTP messages on the MAC layer. - */ - ptpfv1 = 0x110488F7; - bfin_write_EMAC_PTP_FV1(ptpfv1); - ptpfv2 = 0x0140013F; - bfin_write_EMAC_PTP_FV2(ptpfv2); - /* - * To allow the timestamping of Pdelay_Req and Pdelay_Resp - * messages, set the value to 0xFFF0. - */ - ptpfv3 = 0xFFFFFFF0; - bfin_write_EMAC_PTP_FV3(ptpfv3); - - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - default: - return -ERANGE; - } - - if (config.tx_type == HWTSTAMP_TX_OFF && - bfin_mac_hwtstamp_is_none(config.rx_filter)) { - ptpctl &= ~PTP_EN; - bfin_write_EMAC_PTP_CTL(ptpctl); - - SSYNC(); - } else { - ptpctl |= PTP_EN; - bfin_write_EMAC_PTP_CTL(ptpctl); - - /* - * clear any existing timestamp - */ - bfin_read_EMAC_PTP_RXSNAPLO(); - bfin_read_EMAC_PTP_RXSNAPHI(); - - bfin_read_EMAC_PTP_TXSNAPLO(); - bfin_read_EMAC_PTP_TXSNAPHI(); - - SSYNC(); - } - - lp->stamp_cfg = config; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int bfin_mac_hwtstamp_get(struct net_device *netdev, - struct ifreq *ifr) -{ - struct bfin_mac_local *lp = netdev_priv(netdev); - - return copy_to_user(ifr->ifr_data, &lp->stamp_cfg, - sizeof(lp->stamp_cfg)) ? - -EFAULT : 0; -} - -static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) -{ - struct bfin_mac_local *lp = netdev_priv(netdev); - - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - int timeout_cnt = MAX_TIMEOUT_CNT; - - /* When doing time stamping, keep the connection to the socket - * a while longer - */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - - /* - * The timestamping is done at the EMAC module's MII/RMII interface - * when the module sees the Start of Frame of an event message packet. This - * interface is the closest possible place to the physical Ethernet transmission - * medium, providing the best timing accuracy. - */ - while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) - udelay(1); - if (timeout_cnt == 0) - netdev_err(netdev, "timestamp the TX packet failed\n"); - else { - struct skb_shared_hwtstamps shhwtstamps; - u64 ns; - u64 regval; - - regval = bfin_read_EMAC_PTP_TXSNAPLO(); - regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32; - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - ns = regval << lp->shift; - shhwtstamps.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &shhwtstamps); - } - } -} - -static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) -{ - struct bfin_mac_local *lp = netdev_priv(netdev); - u32 valid; - u64 regval, ns; - struct skb_shared_hwtstamps *shhwtstamps; - - if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter)) - return; - - valid = bfin_read_EMAC_PTP_ISTAT() & RXEL; - if (!valid) - return; - - shhwtstamps = skb_hwtstamps(skb); - - regval = bfin_read_EMAC_PTP_RXSNAPLO(); - regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32; - ns = regval << lp->shift; - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(ns); -} - -static void bfin_mac_hwtstamp_init(struct net_device *netdev) -{ - struct bfin_mac_local *lp = netdev_priv(netdev); - u64 addend, ppb; - u32 input_clk, phc_clk; - - /* Initialize hardware timer */ - input_clk = get_sclk(); - phc_clk = bfin_select_phc_clock(input_clk, &lp->shift); - addend = phc_clk * (1ULL << 32); - do_div(addend, input_clk); - bfin_write_EMAC_PTP_ADDEND((u32)addend); - - lp->addend = addend; - ppb = 1000000000ULL * input_clk; - do_div(ppb, phc_clk); - lp->max_ppb = ppb - 1000000000ULL - 1ULL; - - /* Initialize hwstamp config */ - lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; - lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; -} - -static u64 bfin_ptp_time_read(struct bfin_mac_local *lp) -{ - u64 ns; - u32 lo, hi; - - lo = bfin_read_EMAC_PTP_TIMELO(); - hi = bfin_read_EMAC_PTP_TIMEHI(); - - ns = ((u64) hi) << 32; - ns |= lo; - ns <<= lp->shift; - - return ns; -} - -static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns) -{ - u32 hi, lo; - - ns >>= lp->shift; - hi = ns >> 32; - lo = ns & 0xffffffff; - - bfin_write_EMAC_PTP_TIMELO(lo); - bfin_write_EMAC_PTP_TIMEHI(hi); -} - -/* PTP Hardware Clock operations */ - -static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -{ - u64 adj; - u32 diff, addend; - int neg_adj = 0; - struct bfin_mac_local *lp = - container_of(ptp, struct bfin_mac_local, caps); - - if (ppb < 0) { - neg_adj = 1; - ppb = -ppb; - } - addend = lp->addend; - adj = addend; - adj *= ppb; - diff = div_u64(adj, 1000000000ULL); - - addend = neg_adj ? addend - diff : addend + diff; - - bfin_write_EMAC_PTP_ADDEND(addend); - - return 0; -} - -static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - s64 now; - unsigned long flags; - struct bfin_mac_local *lp = - container_of(ptp, struct bfin_mac_local, caps); - - spin_lock_irqsave(&lp->phc_lock, flags); - - now = bfin_ptp_time_read(lp); - now += delta; - bfin_ptp_time_write(lp, now); - - spin_unlock_irqrestore(&lp->phc_lock, flags); - - return 0; -} - -static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct bfin_mac_local *lp = - container_of(ptp, struct bfin_mac_local, caps); - - spin_lock_irqsave(&lp->phc_lock, flags); - - ns = bfin_ptp_time_read(lp); - - spin_unlock_irqrestore(&lp->phc_lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -static int bfin_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct bfin_mac_local *lp = - container_of(ptp, struct bfin_mac_local, caps); - - ns = timespec64_to_ns(ts); - - spin_lock_irqsave(&lp->phc_lock, flags); - - bfin_ptp_time_write(lp, ns); - - spin_unlock_irqrestore(&lp->phc_lock, flags); - - return 0; -} - -static int bfin_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info bfin_ptp_caps = { - .owner = THIS_MODULE, - .name = "BF518 clock", - .max_adj = 0, - .n_alarm = 0, - .n_ext_ts = 0, - .n_per_out = 0, - .n_pins = 0, - .pps = 0, - .adjfreq = bfin_ptp_adjfreq, - .adjtime = bfin_ptp_adjtime, - .gettime64 = bfin_ptp_gettime, - .settime64 = bfin_ptp_settime, - .enable = bfin_ptp_enable, -}; - -static int bfin_phc_init(struct net_device *netdev, struct device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(netdev); - - lp->caps = bfin_ptp_caps; - lp->caps.max_adj = lp->max_ppb; - lp->clock = ptp_clock_register(&lp->caps, dev); - if (IS_ERR(lp->clock)) - return PTR_ERR(lp->clock); - - lp->phc_index = ptp_clock_index(lp->clock); - spin_lock_init(&lp->phc_lock); - - return 0; -} - -static void bfin_phc_release(struct bfin_mac_local *lp) -{ - ptp_clock_unregister(lp->clock); -} - -#else -# define bfin_mac_hwtstamp_is_none(cfg) 0 -# define bfin_mac_hwtstamp_init(dev) -# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP) -# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP) -# define bfin_rx_hwtstamp(dev, skb) -# define bfin_tx_hwtstamp(dev, skb) -# define bfin_phc_init(netdev, dev) 0 -# define bfin_phc_release(lp) -#endif - -static inline void _tx_reclaim_skb(void) -{ - do { - tx_list_head->desc_a.config &= ~DMAEN; - tx_list_head->status.status_word = 0; - if (tx_list_head->skb) { - dev_consume_skb_any(tx_list_head->skb); - tx_list_head->skb = NULL; - } - tx_list_head = tx_list_head->next; - - } while (tx_list_head->status.status_word != 0); -} - -static void tx_reclaim_skb(struct bfin_mac_local *lp) -{ - int timeout_cnt = MAX_TIMEOUT_CNT; - - if (tx_list_head->status.status_word != 0) - _tx_reclaim_skb(); - - if (current_tx_ptr->next == tx_list_head) { - while (tx_list_head->status.status_word == 0) { - /* slow down polling to avoid too many queue stop. */ - udelay(10); - /* reclaim skb if DMA is not running. */ - if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) - break; - if (timeout_cnt-- < 0) - break; - } - - if (timeout_cnt >= 0) - _tx_reclaim_skb(); - else - netif_stop_queue(lp->ndev); - } - - if (current_tx_ptr->next != tx_list_head && - netif_queue_stopped(lp->ndev)) - netif_wake_queue(lp->ndev); - - if (tx_list_head != current_tx_ptr) { - /* shorten the timer interval if tx queue is stopped */ - if (netif_queue_stopped(lp->ndev)) - lp->tx_reclaim_timer.expires = - jiffies + (TX_RECLAIM_JIFFIES >> 4); - else - lp->tx_reclaim_timer.expires = - jiffies + TX_RECLAIM_JIFFIES; - - mod_timer(&lp->tx_reclaim_timer, - lp->tx_reclaim_timer.expires); - } - - return; -} - -static void tx_reclaim_skb_timeout(struct timer_list *t) -{ - struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer); - - tx_reclaim_skb(lp); -} - -static int bfin_mac_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - u16 *data; - u32 data_align = (unsigned long)(skb->data) & 0x3; - - current_tx_ptr->skb = skb; - - if (data_align == 0x2) { - /* move skb->data to current_tx_ptr payload */ - data = (u16 *)(skb->data) - 1; - *data = (u16)(skb->len); - /* - * When transmitting an Ethernet packet, the PTP_TSYNC module requires - * a DMA_Length_Word field associated with the packet. The lower 12 bits - * of this field are the length of the packet payload in bytes and the higher - * 4 bits are the timestamping enable field. - */ - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - *data |= 0x1000; - - current_tx_ptr->desc_a.start_addr = (u32)data; - /* this is important! */ - blackfin_dcache_flush_range((u32)data, - (u32)((u8 *)data + skb->len + 4)); - } else { - *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); - /* enable timestamping for the sent packet */ - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) - *((u16 *)(current_tx_ptr->packet)) |= 0x1000; - memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, - skb->len); - current_tx_ptr->desc_a.start_addr = - (u32)current_tx_ptr->packet; - blackfin_dcache_flush_range( - (u32)current_tx_ptr->packet, - (u32)(current_tx_ptr->packet + skb->len + 2)); - } - - /* make sure the internal data buffers in the core are drained - * so that the DMA descriptors are completely written when the - * DMA engine goes to fetch them below - */ - SSYNC(); - - /* always clear status buffer before start tx dma */ - current_tx_ptr->status.status_word = 0; - - /* enable this packet's dma */ - current_tx_ptr->desc_a.config |= DMAEN; - - /* tx dma is running, just return */ - if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN) - goto out; - - /* tx dma is not running */ - bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a)); - /* dma enabled, read from memory, size is 6 */ - bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config); - /* Turn on the EMAC tx */ - bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); - -out: - bfin_tx_hwtstamp(dev, skb); - - current_tx_ptr = current_tx_ptr->next; - dev->stats.tx_packets++; - dev->stats.tx_bytes += (skb->len); - - tx_reclaim_skb(lp); - - return NETDEV_TX_OK; -} - -#define IP_HEADER_OFF 0 -#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \ - RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE) - -static void bfin_mac_rx(struct bfin_mac_local *lp) -{ - struct net_device *dev = lp->ndev; - struct sk_buff *skb, *new_skb; - unsigned short len; -#if defined(BFIN_MAC_CSUM_OFFLOAD) - unsigned int i; - unsigned char fcs[ETH_FCS_LEN + 1]; -#endif - - /* check if frame status word reports an error condition - * we which case we simply drop the packet - */ - if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { - netdev_notice(dev, "rx: receive error - packet dropped\n"); - dev->stats.rx_dropped++; - goto out; - } - - /* allocate a new skb for next time receive */ - skb = current_rx_ptr->skb; - - new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); - if (!new_skb) { - dev->stats.rx_dropped++; - goto out; - } - /* reserve 2 bytes for RXDWA padding */ - skb_reserve(new_skb, NET_IP_ALIGN); - /* Invalidate the data cache of skb->data range when it is write back - * cache. It will prevent overwriting the new data from DMA - */ - blackfin_dcache_invalidate_range((unsigned long)new_skb->head, - (unsigned long)new_skb->end); - - current_rx_ptr->skb = new_skb; - current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; - - len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN); - /* Deduce Ethernet FCS length from Ethernet payload length */ - len -= ETH_FCS_LEN; - skb_put(skb, len); - - skb->protocol = eth_type_trans(skb, dev); - - bfin_rx_hwtstamp(dev, skb); - -#if defined(BFIN_MAC_CSUM_OFFLOAD) - /* Checksum offloading only works for IPv4 packets with the standard IP header - * length of 20 bytes, because the blackfin MAC checksum calculation is - * based on that assumption. We must NOT use the calculated checksum if our - * IP version or header break that assumption. - */ - if (skb->data[IP_HEADER_OFF] == 0x45) { - skb->csum = current_rx_ptr->status.ip_payload_csum; - /* - * Deduce Ethernet FCS from hardware generated IP payload checksum. - * IP checksum is based on 16-bit one's complement algorithm. - * To deduce a value from checksum is equal to add its inversion. - * If the IP payload len is odd, the inversed FCS should also - * begin from odd address and leave first byte zero. - */ - if (skb->len % 2) { - fcs[0] = 0; - for (i = 0; i < ETH_FCS_LEN; i++) - fcs[i + 1] = ~skb->data[skb->len + i]; - skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum); - } else { - for (i = 0; i < ETH_FCS_LEN; i++) - fcs[i] = ~skb->data[skb->len + i]; - skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum); - } - skb->ip_summed = CHECKSUM_COMPLETE; - } -#endif - - napi_gro_receive(&lp->napi, skb); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; -out: - current_rx_ptr->status.status_word = 0x00000000; - current_rx_ptr = current_rx_ptr->next; -} - -static int bfin_mac_poll(struct napi_struct *napi, int budget) -{ - int i = 0; - struct bfin_mac_local *lp = container_of(napi, - struct bfin_mac_local, - napi); - - while (current_rx_ptr->status.status_word != 0 && i < budget) { - bfin_mac_rx(lp); - i++; - } - - if (i < budget) { - napi_complete_done(napi, i); - if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) - enable_irq(IRQ_MAC_RX); - } - - return i; -} - -/* interrupt routine to handle rx and error signal */ -static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) -{ - struct bfin_mac_local *lp = netdev_priv(dev_id); - u32 status; - - status = bfin_read_DMA1_IRQ_STATUS(); - - bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR); - if (status & DMA_DONE) { - disable_irq_nosync(IRQ_MAC_RX); - set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags); - napi_schedule(&lp->napi); - } - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void bfin_mac_poll_controller(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - bfin_mac_interrupt(IRQ_MAC_RX, dev); - tx_reclaim_skb(lp); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -static void bfin_mac_disable(void) -{ - unsigned int opmode; - - opmode = bfin_read_EMAC_OPMODE(); - opmode &= (~RE); - opmode &= (~TE); - /* Turn off the EMAC */ - bfin_write_EMAC_OPMODE(opmode); -} - -/* - * Enable Interrupts, Receive, and Transmit - */ -static int bfin_mac_enable(struct phy_device *phydev) -{ - int ret; - u32 opmode; - - pr_debug("%s\n", __func__); - - /* Set RX DMA */ - bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); - bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); - - /* Wait MII done */ - ret = bfin_mdio_poll(); - if (ret) - return ret; - - /* We enable only RX here */ - /* ASTP : Enable Automatic Pad Stripping - PR : Promiscuous Mode for test - PSF : Receive frames with total length less than 64 bytes. - FDMODE : Full Duplex Mode - LB : Internal Loopback for test - RE : Receiver Enable */ - opmode = bfin_read_EMAC_OPMODE(); - if (opmode & FDMODE) - opmode |= PSF; - else - opmode |= DRO | DC | PSF; - opmode |= RE; - - if (phydev->interface == PHY_INTERFACE_MODE_RMII) { - opmode |= RMII; /* For Now only 100MBit are supported */ -#if defined(CONFIG_BF537) || defined(CONFIG_BF536) - if (__SILICON_REVISION__ < 3) { - /* - * This isn't publicly documented (fun times!), but in - * silicon <=0.2, the RX and TX pins are clocked together. - * So in order to recv, we must enable the transmit side - * as well. This will cause a spurious TX interrupt too, - * but we can easily consume that. - */ - opmode |= TE; - } -#endif - } - - /* Turn on the EMAC rx */ - bfin_write_EMAC_OPMODE(opmode); - - return 0; -} - -/* Our watchdog timed out. Called by the networking layer */ -static void bfin_mac_timeout(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - pr_debug("%s: %s\n", dev->name, __func__); - - bfin_mac_disable(); - - del_timer(&lp->tx_reclaim_timer); - - /* reset tx queue and free skb */ - while (tx_list_head != current_tx_ptr) { - tx_list_head->desc_a.config &= ~DMAEN; - tx_list_head->status.status_word = 0; - if (tx_list_head->skb) { - dev_kfree_skb(tx_list_head->skb); - tx_list_head->skb = NULL; - } - tx_list_head = tx_list_head->next; - } - - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - - bfin_mac_enable(dev->phydev); - - /* We can accept TX packets again */ - netif_trans_update(dev); /* prevent tx timeout */ -} - -static void bfin_mac_multicast_hash(struct net_device *dev) -{ - u32 emac_hashhi, emac_hashlo; - struct netdev_hw_addr *ha; - u32 crc; - - emac_hashhi = emac_hashlo = 0; - - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc(ETH_ALEN, ha->addr); - crc >>= 26; - - if (crc & 0x20) - emac_hashhi |= 1 << (crc & 0x1f); - else - emac_hashlo |= 1 << (crc & 0x1f); - } - - bfin_write_EMAC_HASHHI(emac_hashhi); - bfin_write_EMAC_HASHLO(emac_hashlo); -} - -/* - * This routine will, depending on the values passed to it, - * either make it accept multicast packets, go into - * promiscuous mode (for TCPDUMP and cousins) or accept - * a select set of multicast packets - */ -static void bfin_mac_set_multicast_list(struct net_device *dev) -{ - u32 sysctl; - - if (dev->flags & IFF_PROMISC) { - netdev_info(dev, "set promisc mode\n"); - sysctl = bfin_read_EMAC_OPMODE(); - sysctl |= PR; - bfin_write_EMAC_OPMODE(sysctl); - } else if (dev->flags & IFF_ALLMULTI) { - /* accept all multicast */ - sysctl = bfin_read_EMAC_OPMODE(); - sysctl |= PAM; - bfin_write_EMAC_OPMODE(sysctl); - } else if (!netdev_mc_empty(dev)) { - /* set up multicast hash table */ - sysctl = bfin_read_EMAC_OPMODE(); - sysctl |= HM; - bfin_write_EMAC_OPMODE(sysctl); - bfin_mac_multicast_hash(dev); - } else { - /* clear promisc or multicast mode */ - sysctl = bfin_read_EMAC_OPMODE(); - sysctl &= ~(RAF | PAM); - bfin_write_EMAC_OPMODE(sysctl); - } -} - -static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - if (!netif_running(netdev)) - return -EINVAL; - - switch (cmd) { - case SIOCSHWTSTAMP: - return bfin_mac_hwtstamp_set(netdev, ifr); - case SIOCGHWTSTAMP: - return bfin_mac_hwtstamp_get(netdev, ifr); - default: - if (netdev->phydev) - return phy_mii_ioctl(netdev->phydev, ifr, cmd); - else - return -EOPNOTSUPP; - } -} - -/* - * this puts the device in an inactive state - */ -static void bfin_mac_shutdown(struct net_device *dev) -{ - /* Turn off the EMAC */ - bfin_write_EMAC_OPMODE(0x00000000); - /* Turn off the EMAC RX DMA */ - bfin_write_DMA1_CONFIG(0x0000); - bfin_write_DMA2_CONFIG(0x0000); -} - -/* - * Open and Initialize the interface - * - * Set up everything, reset the card, etc.. - */ -static int bfin_mac_open(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - int ret; - pr_debug("%s: %s\n", dev->name, __func__); - - /* - * Check that the address is valid. If its not, refuse - * to bring the device up. The user must specify an - * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx - */ - if (!is_valid_ether_addr(dev->dev_addr)) { - netdev_warn(dev, "no valid ethernet hw addr\n"); - return -EINVAL; - } - - /* initial rx and tx list */ - ret = desc_list_init(dev); - if (ret) - return ret; - - phy_start(dev->phydev); - setup_system_regs(dev); - setup_mac_addr(dev->dev_addr); - - bfin_mac_disable(); - ret = bfin_mac_enable(dev->phydev); - if (ret) - return ret; - pr_debug("hardware init finished\n"); - - napi_enable(&lp->napi); - netif_start_queue(dev); - netif_carrier_on(dev); - - return 0; -} - -/* - * this makes the board clean up everything that it can - * and not talk to the outside world. Caused by - * an 'ifconfig ethX down' - */ -static int bfin_mac_close(struct net_device *dev) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - pr_debug("%s: %s\n", dev->name, __func__); - - netif_stop_queue(dev); - napi_disable(&lp->napi); - netif_carrier_off(dev); - - phy_stop(dev->phydev); - phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN); - - /* clear everything */ - bfin_mac_shutdown(dev); - - /* free the rx/tx buffers */ - desc_list_free(); - - return 0; -} - -static const struct net_device_ops bfin_mac_netdev_ops = { - .ndo_open = bfin_mac_open, - .ndo_stop = bfin_mac_close, - .ndo_start_xmit = bfin_mac_hard_start_xmit, - .ndo_set_mac_address = bfin_mac_set_mac_address, - .ndo_tx_timeout = bfin_mac_timeout, - .ndo_set_rx_mode = bfin_mac_set_multicast_list, - .ndo_do_ioctl = bfin_mac_ioctl, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bfin_mac_poll_controller, -#endif -}; - -static int bfin_mac_probe(struct platform_device *pdev) -{ - struct net_device *ndev; - struct bfin_mac_local *lp; - struct platform_device *pd; - struct bfin_mii_bus_platform_data *mii_bus_data; - int rc; - - ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); - if (!ndev) - return -ENOMEM; - - SET_NETDEV_DEV(ndev, &pdev->dev); - platform_set_drvdata(pdev, ndev); - lp = netdev_priv(ndev); - lp->ndev = ndev; - - /* Grab the MAC address in the MAC */ - *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); - *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); - - /* probe mac */ - /*todo: how to probe? which is revision_register */ - bfin_write_EMAC_ADDRLO(0x12345678); - if (bfin_read_EMAC_ADDRLO() != 0x12345678) { - dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); - rc = -ENODEV; - goto out_err_probe_mac; - } - - - /* - * Is it valid? (Did bootloader initialize it?) - * Grab the MAC from the board somehow - * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c - */ - if (!is_valid_ether_addr(ndev->dev_addr)) { - if (bfin_get_ether_addr(ndev->dev_addr) || - !is_valid_ether_addr(ndev->dev_addr)) { - /* Still not valid, get a random one */ - netdev_warn(ndev, "Setting Ethernet MAC to a random one\n"); - eth_hw_addr_random(ndev); - } - } - - setup_mac_addr(ndev->dev_addr); - - if (!dev_get_platdata(&pdev->dev)) { - dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); - rc = -ENODEV; - goto out_err_probe_mac; - } - pd = dev_get_platdata(&pdev->dev); - lp->mii_bus = platform_get_drvdata(pd); - if (!lp->mii_bus) { - dev_err(&pdev->dev, "Cannot get mii_bus!\n"); - rc = -ENODEV; - goto out_err_probe_mac; - } - lp->mii_bus->priv = ndev; - mii_bus_data = dev_get_platdata(&pd->dev); - - rc = mii_probe(ndev, mii_bus_data->phy_mode); - if (rc) { - dev_err(&pdev->dev, "MII Probe failed!\n"); - goto out_err_mii_probe; - } - - lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; - lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; - - ndev->netdev_ops = &bfin_mac_netdev_ops; - ndev->ethtool_ops = &bfin_mac_ethtool_ops; - - timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0); - - lp->flags = 0; - netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM); - - spin_lock_init(&lp->lock); - - /* now, enable interrupts */ - /* register irq handler */ - rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, - 0, "EMAC_RX", ndev); - if (rc) { - dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); - rc = -EBUSY; - goto out_err_request_irq; - } - - rc = register_netdev(ndev); - if (rc) { - dev_err(&pdev->dev, "Cannot register net device!\n"); - goto out_err_reg_ndev; - } - - bfin_mac_hwtstamp_init(ndev); - rc = bfin_phc_init(ndev, &pdev->dev); - if (rc) { - dev_err(&pdev->dev, "Cannot register PHC device!\n"); - goto out_err_phc; - } - - /* now, print out the card info, in a short format.. */ - netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); - - return 0; - -out_err_phc: -out_err_reg_ndev: - free_irq(IRQ_MAC_RX, ndev); -out_err_request_irq: - netif_napi_del(&lp->napi); -out_err_mii_probe: - mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); -out_err_probe_mac: - free_netdev(ndev); - - return rc; -} - -static int bfin_mac_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct bfin_mac_local *lp = netdev_priv(ndev); - - bfin_phc_release(lp); - - lp->mii_bus->priv = NULL; - - unregister_netdev(ndev); - - netif_napi_del(&lp->napi); - - free_irq(IRQ_MAC_RX, ndev); - - free_netdev(ndev); - - return 0; -} - -#ifdef CONFIG_PM -static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct bfin_mac_local *lp = netdev_priv(net_dev); - - if (lp->wol) { - bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE); - bfin_write_EMAC_WKUP_CTL(MPKE); - enable_irq_wake(IRQ_MAC_WAKEDET); - } else { - if (netif_running(net_dev)) - bfin_mac_close(net_dev); - } - - return 0; -} - -static int bfin_mac_resume(struct platform_device *pdev) -{ - struct net_device *net_dev = platform_get_drvdata(pdev); - struct bfin_mac_local *lp = netdev_priv(net_dev); - - if (lp->wol) { - bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); - bfin_write_EMAC_WKUP_CTL(0); - disable_irq_wake(IRQ_MAC_WAKEDET); - } else { - if (netif_running(net_dev)) - bfin_mac_open(net_dev); - } - - return 0; -} -#else -#define bfin_mac_suspend NULL -#define bfin_mac_resume NULL -#endif /* CONFIG_PM */ - -static int bfin_mii_bus_probe(struct platform_device *pdev) -{ - struct mii_bus *miibus; - struct bfin_mii_bus_platform_data *mii_bus_pd; - const unsigned short *pin_req; - int rc, i; - - mii_bus_pd = dev_get_platdata(&pdev->dev); - if (!mii_bus_pd) { - dev_err(&pdev->dev, "No peripherals in platform data!\n"); - return -EINVAL; - } - - /* - * We are setting up a network card, - * so set the GPIO pins to Ethernet mode - */ - pin_req = mii_bus_pd->mac_peripherals; - rc = peripheral_request_list(pin_req, KBUILD_MODNAME); - if (rc) { - dev_err(&pdev->dev, "Requesting peripherals failed!\n"); - return rc; - } - - rc = -ENOMEM; - miibus = mdiobus_alloc(); - if (miibus == NULL) - goto out_err_alloc; - miibus->read = bfin_mdiobus_read; - miibus->write = bfin_mdiobus_write; - - miibus->parent = &pdev->dev; - miibus->name = "bfin_mii_bus"; - miibus->phy_mask = mii_bus_pd->phy_mask; - - snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); - if (rc != mii_bus_pd->phydev_number) - dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n", - mii_bus_pd->phydev_number); - for (i = 0; i < rc; ++i) { - unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr; - if (phyaddr < PHY_MAX_ADDR) - miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq; - else - dev_err(&pdev->dev, - "Invalid PHY address %i for phydev %i\n", - phyaddr, i); - } - - rc = mdiobus_register(miibus); - if (rc) { - dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto out_err_irq_alloc; - } - - platform_set_drvdata(pdev, miibus); - return 0; - -out_err_irq_alloc: - mdiobus_free(miibus); -out_err_alloc: - peripheral_free_list(pin_req); - - return rc; -} - -static int bfin_mii_bus_remove(struct platform_device *pdev) -{ - struct mii_bus *miibus = platform_get_drvdata(pdev); - struct bfin_mii_bus_platform_data *mii_bus_pd = - dev_get_platdata(&pdev->dev); - - mdiobus_unregister(miibus); - mdiobus_free(miibus); - peripheral_free_list(mii_bus_pd->mac_peripherals); - - return 0; -} - -static struct platform_driver bfin_mii_bus_driver = { - .probe = bfin_mii_bus_probe, - .remove = bfin_mii_bus_remove, - .driver = { - .name = "bfin_mii_bus", - }, -}; - -static struct platform_driver bfin_mac_driver = { - .probe = bfin_mac_probe, - .remove = bfin_mac_remove, - .resume = bfin_mac_resume, - .suspend = bfin_mac_suspend, - .driver = { - .name = KBUILD_MODNAME, - }, -}; - -static struct platform_driver * const drivers[] = { - &bfin_mii_bus_driver, - &bfin_mac_driver, -}; - -static int __init bfin_mac_init(void) -{ - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); -} - -module_init(bfin_mac_init); - -static void __exit bfin_mac_cleanup(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} - -module_exit(bfin_mac_cleanup); - diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h deleted file mode 100644 index 4ad5b9be3f84..000000000000 --- a/drivers/net/ethernet/adi/bfin_mac.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Blackfin On-Chip MAC Driver - * - * Copyright 2004-2007 Analog Devices Inc. - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Licensed under the GPL-2 or later. - */ -#ifndef _BFIN_MAC_H_ -#define _BFIN_MAC_H_ - -#include <linux/net_tstamp.h> -#include <linux/ptp_clock_kernel.h> -#include <linux/timer.h> -#include <linux/etherdevice.h> -#include <linux/bfin_mac.h> - -/* - * Disable hardware checksum for bug #5600 if writeback cache is - * enabled. Otherwize, corrupted RX packet will be sent up stack - * without error mark. - */ -#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK -#define BFIN_MAC_CSUM_OFFLOAD -#endif - -#define TX_RECLAIM_JIFFIES (HZ / 5) -#define BFIN_MAC_RX_IRQ_DISABLED 1 - -struct dma_descriptor { - struct dma_descriptor *next_dma_desc; - unsigned long start_addr; - unsigned short config; - unsigned short x_count; -}; - -struct status_area_rx { -#if defined(BFIN_MAC_CSUM_OFFLOAD) - unsigned short ip_hdr_csum; /* ip header checksum */ - /* ip payload(udp or tcp or others) checksum */ - unsigned short ip_payload_csum; -#endif - unsigned long status_word; /* the frame status word */ -}; - -struct status_area_tx { - unsigned long status_word; /* the frame status word */ -}; - -/* use two descriptors for a packet */ -struct net_dma_desc_rx { - struct net_dma_desc_rx *next; - struct sk_buff *skb; - struct dma_descriptor desc_a; - struct dma_descriptor desc_b; - struct status_area_rx status; -}; - -/* use two descriptors for a packet */ -struct net_dma_desc_tx { - struct net_dma_desc_tx *next; - struct sk_buff *skb; - struct dma_descriptor desc_a; - struct dma_descriptor desc_b; - unsigned char packet[1560]; - struct status_area_tx status; -}; - -struct bfin_mac_local { - spinlock_t lock; - - int wol; /* Wake On Lan */ - int irq_wake_requested; - struct timer_list tx_reclaim_timer; - struct net_device *ndev; - struct napi_struct napi; - unsigned long flags; - - /* Data for EMAC_VLAN1 regs */ - u16 vlan1_mask, vlan2_mask; - - /* MII and PHY stuffs */ - int old_link; /* used by bf537_adjust_link */ - int old_speed; - int old_duplex; - - struct mii_bus *mii_bus; - -#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) - u32 addend; - unsigned int shift; - s32 max_ppb; - struct hwtstamp_config stamp_cfg; - struct ptp_clock_info caps; - struct ptp_clock *clock; - int phc_index; - spinlock_t phc_lock; /* protects time lo/hi registers */ -#endif -}; - -int bfin_get_ether_addr(char *addr); - -#endif diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 527908c7e384..baca8f704a45 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -56,7 +56,7 @@ static atomic_t instance_count = ATOMIC_INIT(~0); /* Module parameters */ static int debug = -1; -module_param(debug, int, S_IRUGO | S_IWUSR); +module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | @@ -65,12 +65,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | #define RX_DESCRIPTORS 64 static int dma_rx_num = RX_DESCRIPTORS; -module_param(dma_rx_num, int, S_IRUGO | S_IWUSR); +module_param(dma_rx_num, int, 0644); MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); #define TX_DESCRIPTORS 64 static int dma_tx_num = TX_DESCRIPTORS; -module_param(dma_tx_num, int, S_IRUGO | S_IWUSR); +module_param(dma_tx_num, int, 0644); MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index bf2de5298005..1b9d3130af4d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -631,8 +631,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) */ wmb(); - writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + writel_relaxed(mmio_read_reg, + ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + mmiowb(); for (i = 0; i < timeout; i++) { if (read_resp->req_id == mmio_read->seq_num) break; @@ -1826,7 +1828,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + writel_relaxed((u32)aenq->head, + dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + mmiowb(); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2f7657227cfe..6fdc753d9483 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -107,7 +107,8 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) return io_sq->q_depth - 1 - cnt; } -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, + bool relaxed) { u16 tail; @@ -116,7 +117,10 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) pr_debug("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); - writel(tail, io_sq->db_addr); + if (relaxed) + writel_relaxed(tail, io_sq->db_addr); + else + writel(tail, io_sq->db_addr); return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 6975150d144e..a822e70c2af3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -556,7 +556,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) * issue a doorbell */ wmb(); - ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); + ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true); + mmiowb(); } rx_ring->next_to_use = next_to_use; @@ -2151,7 +2152,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) if (netif_xmit_stopped(txq) || !skb->xmit_more) { /* trigger the dma engine */ - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.doorbells++; u64_stats_update_end(&tx_ring->syncp); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 358f7ab77c70..c99e3e845ac0 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -649,7 +649,7 @@ static void amd8111e_free_ring(struct amd8111e_priv *lp) static int amd8111e_tx(struct net_device *dev) { struct amd8111e_priv *lp = netdev_priv(dev); - int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; + int tx_index; int status; /* Complete all the transmit packet */ while (lp->tx_complete_idx != lp->tx_idx){ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 100adee778df..7c204f05b418 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -137,21 +137,21 @@ static unsigned int ecc_ded_period = 600; #ifdef CONFIG_AMD_XGBE_HAVE_ECC /* Only expose the ECC parameters if supported */ -module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO); +module_param(ecc_sec_info_threshold, uint, 0644); MODULE_PARM_DESC(ecc_sec_info_threshold, " ECC corrected error informational threshold setting"); -module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO); +module_param(ecc_sec_warn_threshold, uint, 0644); MODULE_PARM_DESC(ecc_sec_warn_threshold, " ECC corrected error warning threshold setting"); -module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO); +module_param(ecc_sec_period, uint, 0644); MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)"); -module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO); +module_param(ecc_ded_threshold, uint, 0644); MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting"); -module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO); +module_param(ecc_ded_period, uint, 0644); MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)"); #endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index d91fa595be98..795e556d4a3f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -131,7 +131,7 @@ MODULE_VERSION(XGBE_DRV_VERSION); MODULE_DESCRIPTION(XGBE_DRV_DESC); static int debug = -1; -module_param(debug, int, S_IWUSR | S_IRUGO); +module_param(debug, int, 0644); MODULE_PARM_DESC(debug, " Network interface message level setting"); static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..eb23f9ba1a9a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev) struct net_device *netdev = pdata->netdev; int ret = 0; + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index f17a160dbff2..137cbb470af2 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -247,8 +247,8 @@ static int mace_probe(struct platform_device *pdev) dev->netdev_ops = &mace_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; - printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n", - dev->name, dev->dev_addr); + pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n", + dev->dev_addr, mp->chipid); err = register_netdev(dev); if (!err) @@ -589,7 +589,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) else if (fs & (UFLO|LCOL|RTRY)) { ++dev->stats.tx_aborted_errors; if (mb->xmtfs & UFLO) { - printk(KERN_ERR "%s: DMA underrun.\n", dev->name); dev->stats.tx_fifo_errors++; mace_txdma_reset(dev); } @@ -644,10 +643,8 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { dev->stats.rx_errors++; - if (frame_status & RS_OFLO) { - printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); + if (frame_status & RS_OFLO) dev->stats.rx_fifo_errors++; - } if (frame_status & RS_CLSN) dev->stats.collisions++; if (frame_status & RS_FRAMERR) @@ -770,18 +767,4 @@ static struct platform_driver mac_mace_driver = { }, }; -static int __init mac_mace_init_module(void) -{ - if (!MACH_IS_MAC) - return -ENODEV; - - return platform_driver_register(&mac_mace_driver); -} - -static void __exit mac_mace_cleanup_module(void) -{ - platform_driver_unregister(&mac_mace_driver); -} - -module_init(mac_mace_init_module); -module_exit(mac_mace_cleanup_module); +module_platform_driver(mac_mace_driver); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0b49f1aeebd3..fc7383106946 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -36,6 +36,8 @@ #define AQ_CFG_TX_FRAME_MAX (16U * 1024U) #define AQ_CFG_RX_FRAME_MAX (4U * 1024U) +#define AQ_CFG_TX_CLEAN_BUDGET 256U + /* LRO */ #define AQ_CFG_IS_LRO_DEF 1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ebbaf63eaf47..c96a92118b8b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->hw_features |= aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; + self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; @@ -937,3 +939,23 @@ err_exit: out: return err; } + +void aq_nic_shutdown(struct aq_nic_s *self) +{ + int err = 0; + + if (!self->ndev) + return; + + rtnl_lock(); + + netif_device_detach(self->ndev); + + err = aq_nic_stop(self); + if (err < 0) + goto err_exit; + aq_nic_deinit(self); + +err_exit: + rtnl_unlock(); +}
\ No newline at end of file diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index d16b0f1a95aa..219b550d1665 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); u32 aq_nic_get_fw_version(struct aq_nic_s *self); int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); +void aq_nic_shutdown(struct aq_nic_s *self); #endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 22889fc158f2..ecc6306f940f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev, goto err_ioremap; self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); + if (!self->aq_hw) { + err = -ENOMEM; + goto err_ioremap; + } self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); for (bar = 0; bar < 4; ++bar) { @@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev, mmio_pa = pci_resource_start(pdev, bar); if (mmio_pa == 0U) { err = -EIO; - goto err_ioremap; + goto err_free_aq_hw; } reg_sz = pci_resource_len(pdev, bar); if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { err = -EIO; - goto err_ioremap; + goto err_free_aq_hw; } self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); if (!self->aq_hw->mmio) { err = -EIO; - goto err_ioremap; + goto err_free_aq_hw; } break; } @@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev, if (bar == 4) { err = -EIO; - goto err_ioremap; + goto err_free_aq_hw; } numvecs = min((u8)AQ_CFG_VECS_DEF, @@ -290,6 +294,8 @@ err_register: aq_pci_free_irq_vectors(self); err_hwinit: iounmap(self->aq_hw->mmio); +err_free_aq_hw: + kfree(self->aq_hw); err_ioremap: free_netdev(ndev); err_pci_func: @@ -317,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void aq_pci_shutdown(struct pci_dev *pdev) +{ + struct aq_nic_s *self = pci_get_drvdata(pdev); + + aq_nic_shutdown(self); + + pci_disable_device(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } +} + static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) { struct aq_nic_s *self = pci_get_drvdata(pdev); @@ -339,6 +359,7 @@ static struct pci_driver aq_pci_ops = { .remove = aq_pci_remove, .suspend = aq_pci_suspend, .resume = aq_pci_resume, + .shutdown = aq_pci_shutdown, }; module_pci_driver(aq_pci_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0be6a11370bb..b5f1f62e8e25 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) netif_stop_subqueue(ndev, ring->idx); } -void aq_ring_tx_clean(struct aq_ring_s *self) +bool aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); + unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; - for (; self->sw_head != self->hw_head; + for (; self->sw_head != self->hw_head && budget--; self->sw_head = aq_ring_next_dx(self, self->sw_head)) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; @@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self) buff->pa = 0U; buff->eop_index = 0xffffU; } + + return !!budget; } #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 965fae0fb6e0..ac1329f4051d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self); void aq_ring_update_queue_state(struct aq_ring_s *ring); void aq_ring_queue_wake(struct aq_ring_s *ring); void aq_ring_queue_stop(struct aq_ring_s *ring); -void aq_ring_tx_clean(struct aq_ring_s *self); +bool aq_ring_tx_clean(struct aq_ring_s *self); int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, int *work_done, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f890b8a5a862..d335c334fa56 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -35,12 +35,12 @@ struct aq_vec_s { static int aq_vec_poll(struct napi_struct *napi, int budget) { struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); + unsigned int sw_tail_old = 0U; struct aq_ring_s *ring = NULL; + bool was_tx_cleaned = true; + unsigned int i = 0U; int work_done = 0; int err = 0; - unsigned int i = 0U; - unsigned int sw_tail_old = 0U; - bool was_tx_cleaned = false; if (!self) { err = -EINVAL; @@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (ring[AQ_VEC_TX_ID].sw_head != ring[AQ_VEC_TX_ID].hw_head) { - aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); - was_tx_cleaned = true; } err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, @@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } } - if (was_tx_cleaned) + if (!was_tx_cleaned) work_done = budget; if (work_done < budget) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile b/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 967f0fd07fcf..84d7f4dd4ce1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -21,6 +21,10 @@ #define HW_ATL_UCP_0X370_REG 0x0370U +#define HW_ATL_MIF_CMD 0x0200U +#define HW_ATL_MIF_ADDR 0x0208U +#define HW_ATL_MIF_VAL 0x020CU + #define HW_ATL_FW_SM_RAM 0x2U #define HW_ATL_MPI_FW_VERSION 0x18 #define HW_ATL_MPI_CONTROL_ADR 0x0368U @@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) { + u32 gsr, val; int k = 0; - u32 gsr; aq_hw_write_reg(self, 0x404, 0x40e1); AQ_HW_SLEEP(50); /* Cleanup SPI */ - aq_hw_write_reg(self, 0x534, 0xA0); - aq_hw_write_reg(self, 0x100, 0x9F); - aq_hw_write_reg(self, 0x100, 0x809F); + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); @@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) aq_hw_write_reg(self, 0x404, 0x80e0); aq_hw_write_reg(self, 0x32a8, 0x0); aq_hw_write_reg(self, 0x520, 0x1); + + /* Reset SPI again because of possible interrupted SPI burst */ + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); AQ_HW_SLEEP(10); + /* Clear SPI reset state */ + aq_hw_write_reg(self, 0x53C, val & ~0x10); + aq_hw_write_reg(self, 0x404, 0x180e0); for (k = 0; k < 1000; k++) { @@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) aq_pr_err("FW kickstart failed\n"); return -EIO; } + /* Old FW requires fixed delay after init */ + AQ_HW_SLEEP(15); return 0; } static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) { - u32 gsr, rbl_status; + u32 gsr, val, rbl_status; int k; aq_hw_write_reg(self, 0x404, 0x40e1); @@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) /* Alter RBL status */ aq_hw_write_reg(self, 0x388, 0xDEAD); + /* Cleanup SPI */ + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); + /* Global software reset*/ hw_atl_rx_rx_reg_res_dis_set(self, 0U); hw_atl_tx_tx_reg_res_dis_set(self, 0U); @@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) aq_pr_err("FW kickstart failed\n"); return -EIO; } + /* Old FW requires fixed delay after init */ + AQ_HW_SLEEP(15); return 0; } @@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, } } - aq_hw_write_reg(self, 0x00000208U, a); - - for (++cnt; --cnt;) { - u32 i = 0U; + aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a); - aq_hw_write_reg(self, 0x00000200U, 0x00008000U); + for (++cnt; --cnt && !err;) { + aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U); - for (i = 1024U; - (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { - } + if (IS_CHIP_FEATURE(REVISION_B1)) + AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self, + HW_ATL_MIF_ADDR), + 1, 1000U); + else + AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self, + HW_ATL_MIF_CMD)), + 1, 1000U); - *(p++) = aq_hw_read_reg(self, 0x0000020CU); + *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL); + a += 4; } hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); @@ -483,7 +505,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, err_exit:; } -int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) +static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); @@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) u32 val = hw_atl_reg_glb_mif_id_get(self); u32 mif_rev = val & 0xFFU; - if ((3U & mif_rev) == 1U) { - chip_features |= - HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + if ((0xFU & mif_rev) == 1U) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | HAL_ATLANTIC_UTILS_CHIP_MIPS; - } else if ((3U & mif_rev) == 2U) { - chip_features |= - HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + } else if ((0xFU & mif_rev) == 2U) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } else if ((0xFU & mif_rev) == 0xAU) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | HAL_ATLANTIC_UTILS_CHIP_MIPS | HAL_ATLANTIC_UTILS_CHIP_TPO2 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 2c690947910a..cd8f18f39c61 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox { #define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U #define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ self->chip_features) diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 5265b937677b..a445de6837a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -13,7 +13,7 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 #define NIC_BUILD_DRIVER_VERSION 2 -#define NIC_REVISION_DRIVER_VERSION 0 +#define NIC_REVISION_DRIVER_VERSION 1 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 16f9bee992fe..0f6576802607 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* Optional regulator for PHY */ priv->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(priv->regulator)) { - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out_clk_disable; + } dev_err(dev, "no regulator found\n"); priv->regulator = NULL; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f15a8fc6dfc9..f9a3c1a76d5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -574,6 +574,34 @@ static int bcm_sysport_set_wol(struct net_device *dev, return 0; } +static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, + u32 usecs, u32 pkts) +{ + u32 reg; + + reg = rdma_readl(priv, RDMA_MBDONE_INTR); + reg &= ~(RDMA_INTR_THRESH_MASK | + RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); + reg |= pkts; + reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; + rdma_writel(priv, reg, RDMA_MBDONE_INTR); +} + +static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct bcm_sysport_priv *priv = ring->priv; + u32 reg; + + reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); + reg &= ~(RING_INTR_THRESH_MASK | + RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); + reg |= ec->tx_max_coalesced_frames; + reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << + RING_TIMEOUT_SHIFT; + tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); +} + static int bcm_sysport_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { @@ -589,6 +617,7 @@ static int bcm_sysport_get_coalesce(struct net_device *dev, ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; + ec->use_adaptive_rx_coalesce = priv->dim.use_dim; return 0; } @@ -597,8 +626,9 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct net_dim_cq_moder moder; + u32 usecs, pkts; unsigned int i; - u32 reg; /* Base system clock is 125Mhz, DMA timeout is this reference clock * divided by 1024, which yield roughly 8.192 us, our maximum value has @@ -611,26 +641,28 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, return -EINVAL; if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || - (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) + (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || + ec->use_adaptive_tx_coalesce) return -EINVAL; - for (i = 0; i < dev->num_tx_queues; i++) { - reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i)); - reg &= ~(RING_INTR_THRESH_MASK | - RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); - reg |= ec->tx_max_coalesced_frames; - reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << - RING_TIMEOUT_SHIFT; - tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i)); + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); + + priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; + priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = priv->rx_coalesce_usecs; + pkts = priv->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { + moder = net_dim_get_def_profile(priv->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; } - reg = rdma_readl(priv, RDMA_MBDONE_INTR); - reg &= ~(RDMA_INTR_THRESH_MASK | - RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); - reg |= ec->rx_max_coalesced_frames; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) << - RDMA_TIMEOUT_SHIFT; - rdma_writel(priv, reg, RDMA_MBDONE_INTR); + priv->dim.use_dim = ec->use_adaptive_rx_coalesce; + + /* Apply desired coalescing parameters */ + bcm_sysport_set_rx_coalesce(priv, usecs, pkts); return 0; } @@ -709,6 +741,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; + unsigned int processed_bytes = 0; struct bcm_sysport_cb *cb; struct sk_buff *skb; unsigned int p_index; @@ -800,6 +833,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, */ skb_pull(skb, sizeof(*rsb) + 2); len -= (sizeof(*rsb) + 2); + processed_bytes += len; /* UniMAC may forward CRC */ if (priv->crc_fwd) { @@ -824,6 +858,9 @@ next: priv->rx_read_ptr = 0; } + priv->dim.packets = processed; + priv->dim.bytes = processed_bytes; + return processed; } @@ -855,10 +892,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct net_device *ndev = priv->netdev; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Clear status before servicing to reduce spurious interrupts */ @@ -871,29 +910,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } u64_stats_update_begin(&priv->syncp); @@ -976,6 +1009,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_priv *priv = container_of(napi, struct bcm_sysport_priv, napi); + struct net_dim_sample dim_sample; unsigned int work_done = 0; work_done = bcm_sysport_desc_rx(priv, budget); @@ -998,6 +1032,12 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); } + if (priv->dim.use_dim) { + net_dim_sample(priv->dim.event_ctr, priv->dim.packets, + priv->dim.bytes, &dim_sample); + net_dim(&priv->dim.dim, dim_sample); + } + return work_done; } @@ -1016,6 +1056,20 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); } +static void bcm_sysport_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct bcm_sysport_net_dim *ndim = + container_of(dim, struct bcm_sysport_net_dim, dim); + struct bcm_sysport_priv *priv = + container_of(ndim, struct bcm_sysport_priv, dim); + struct net_dim_cq_moder cur_profile = + net_dim_get_profile(dim->mode, dim->profile_ix); + + bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); + dim->state = NET_DIM_START_MEASURE; +} + /* RX and misc interrupt routine */ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { @@ -1034,6 +1088,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) } if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { + priv->dim.event_ctr++; if (likely(napi_schedule_prep(&priv->napi))) { /* disable RX interrupts */ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); @@ -1137,7 +1192,7 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, u32 csum_info; u8 ip_proto; u16 csum_start; - u16 ip_ver; + __be16 ip_ver; /* Re-allocate SKB if needed */ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { @@ -1156,12 +1211,12 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, memset(tsb, 0, sizeof(*tsb)); if (skb->ip_summed == CHECKSUM_PARTIAL) { - ip_ver = htons(skb->protocol); + ip_ver = skb->protocol; switch (ip_ver) { - case ETH_P_IP: + case htons(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; break; - case ETH_P_IPV6: + case htons(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; break; default: @@ -1175,7 +1230,8 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { csum_info |= L4_LENGTH_VALID; - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + if (ip_proto == IPPROTO_UDP && + ip_ver == htons(ETH_P_IP)) csum_info |= L4_UDP; } else { csum_info = 0; @@ -1358,6 +1414,37 @@ out: phy_print_status(phydev); } +static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, + void (*cb)(struct work_struct *work)) +{ + struct bcm_sysport_net_dim *dim = &priv->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) +{ + struct bcm_sysport_net_dim *dim = &priv->dim; + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + usecs = priv->rx_coalesce_usecs; + pkts = priv->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_profile(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcm_sysport_set_rx_coalesce(priv, usecs, pkts); +} + static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, unsigned int index) { @@ -1394,6 +1481,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; @@ -1597,8 +1685,6 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_END_ADDR_HI); rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); - rdma_writel(priv, 1, RDMA_MBDONE_INTR); - netif_dbg(priv, hw, priv->netdev, "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", priv->num_rx_bds, priv->rx_bds); @@ -1766,6 +1852,8 @@ static void bcm_sysport_netif_start(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); /* Enable NAPI */ + bcm_sysport_init_dim(priv, bcm_sysport_dim_work); + bcm_sysport_init_rx_coalesce(priv); napi_enable(&priv->napi); /* Enable RX interrupt and TX ring full interrupt */ @@ -1951,6 +2039,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) /* stop all software from updating hardware */ netif_tx_stop_all_queues(dev); napi_disable(&priv->napi); + cancel_work_sync(&priv->dim.dim.work); phy_stop(dev->phydev); /* mask all interrupts */ @@ -2270,6 +2359,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + priv->rx_max_coalesced_frames = 1; u64_stats_init(&priv->syncp); priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f5a984c1c986..d6e5d0cbf3a3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include <linux/if_vlan.h> +#include <linux/net_dim.h> /* Receive/transmit descriptor format */ #define DESC_ADDR_HI_STATUS_LEN 0x00 @@ -695,6 +696,14 @@ struct bcm_sysport_hw_params { unsigned int num_rx_desc_words; }; +struct bcm_sysport_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct net_dim dim; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -706,7 +715,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ @@ -743,6 +752,10 @@ struct bcm_sysport_priv { unsigned int rx_read_ptr; unsigned int rx_c_index; + struct bcm_sysport_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + /* PHY device */ struct device_node *phy_dn; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 8eef9fb6b1fe..e6ea8e61f96d 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -533,7 +533,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, int i; for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { - int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); + unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; slot = &ring->slots[i]; dev_kfree_skb(slot->skb); @@ -1190,7 +1191,7 @@ static int bgmac_open(struct net_device *net_dev) bgmac_chip_init(bgmac); err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, - KBUILD_MODNAME, net_dev); + net_dev->name, net_dev); if (err < 0) { dev_err(bgmac->dev, "IRQ request error: %d!\n", err); bgmac_dma_cleanup(bgmac); @@ -1492,6 +1493,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) struct net_device *net_dev = bgmac->net_dev; int err; + bgmac_chip_intrs_off(bgmac); + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); dev_set_drvdata(bgmac->dev, bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4040d846da8e..40d02fec2747 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -479,9 +479,9 @@ struct bgmac_rx_header { struct bgmac { union { struct { - void *base; - void *idm_base; - void *nicpm_base; + void __iomem *base; + void __iomem *idm_base; + void __iomem *nicpm_base; } plat; struct { struct bcma_device *core; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 5e34b34f7740..9ffc4a8c5fc7 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -87,7 +87,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); static int disable_msi = 0; -module_param(disable_msi, int, S_IRUGO); +module_param(disable_msi, int, 0444); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); typedef enum { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 352beff796ae..d847e1b9c37b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -166,6 +166,12 @@ do { \ #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) +#define REG_WR_RELAXED(bp, offset, val) \ + writel_relaxed((u32)val, REG_ADDR(bp, offset)) + +#define REG_WR16_RELAXED(bp, offset, val) \ + writew_relaxed((u16)val, REG_ADDR(bp, offset)) + #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) @@ -758,10 +764,8 @@ struct bnx2x_fastpath { #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DOORBELL(bp, cid, val) \ - do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ - } while (0) +#define DOORBELL_RELAXED(bp, cid, val) \ + writel_relaxed((u32)(val), (bp)->doorbells + ((bp)->db_size * (cid))) /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index d7c98e807ca8..95871576ab92 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4153,9 +4153,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); txdata->tx_db.data.prod += nbd; - barrier(); + /* make sure descriptor update is observed by HW */ + wmb(); - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a5265e1344f1..a8ce5c55bbb0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -522,8 +522,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, wmb(); for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, - ((u32 *)&rx_prods)[i]); + REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, + ((u32 *)&rx_prods)[i]); mmiowb(); /* keep prod updates ordered */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1e33abde4a3e..da18aa239acb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2591,8 +2591,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) wmb(); txdata->tx_db.data.prod += 2; - barrier(); - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + /* make sure descriptor update is observed by the HW */ + wmb(); + DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); barrier(); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fc9af4aadb..c766ae23bc74 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -97,29 +97,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); int bnx2x_num_queues; -module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO); +module_param_named(num_queues, bnx2x_num_queues, int, 0444); MODULE_PARM_DESC(num_queues, " Set number of queues (default is as a number of CPUs)"); static int disable_tpa; -module_param(disable_tpa, int, S_IRUGO); +module_param(disable_tpa, int, 0444); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); static int int_mode; -module_param(int_mode, int, S_IRUGO); +module_param(int_mode, int, 0444); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); static int dropless_fc; -module_param(dropless_fc, int, S_IRUGO); +module_param(dropless_fc, int, 0444); MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); static int mrrs = -1; -module_param(mrrs, int, S_IRUGO); +module_param(mrrs, int, 0444); MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); static int debug; -module_param(debug, int, S_IRUGO); +module_param(debug, int, 0444); MODULE_PARM_DESC(debug, " Default debug msglevel"); static struct workqueue_struct *bnx2x_wq; @@ -3817,8 +3817,8 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp) */ mb(); - REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), - bp->spq_prod_idx); + REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); mmiowb(); } @@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp) bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); if (IS_ERR(bp->ptp_clock)) { bp->ptp_clock = NULL; - BNX2X_ERR("PTP clock registeration failed\n"); + BNX2X_ERR("PTP clock registration failed\n"); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 76a4668c50fe..8e0a317b31f7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -170,7 +170,9 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) wmb(); /* Trigger the PF FW */ - writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); + writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid); + + mmiowb(); /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1500243b9886..1991f0c7bc0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; - u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); } @@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); - u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); @@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); } cpr->cp_raw_cons = raw_cons; @@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = i; rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; mem_size = rxr->rx_agg_bmap_size / 8; rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); @@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = txr->bnapi->index; if (bp->tx_push_size) { dma_addr_t mapping; @@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp) static int bnxt_alloc_cp_rings(struct bnxt *bp) { - int i, rc; + int i, rc, ulp_base_vec, ulp_msix; + ulp_msix = bnxt_get_ulp_msix_num(bp); + ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; @@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; + + if (ulp_msix && i >= ulp_base_vec) + ring->map_idx = i + ulp_msix; + else + ring->map_idx = i; } return 0; } @@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; + if (bp->hw_rx_port_stats) { dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, bp->hw_rx_port_stats, bp->hw_rx_port_stats_map); bp->hw_rx_port_stats = NULL; - bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + + if (bp->hw_rx_port_stats_ext) { + dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), + bp->hw_rx_port_stats_ext, + bp->hw_rx_port_stats_ext_map); + bp->hw_rx_port_stats_ext = NULL; } if (!bp->bnapi) @@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp) bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || + bp->hwrm_spec_code == 0x10900) + return 0; + + bp->hw_rx_port_stats_ext = + dma_zalloc_coherent(&pdev->dev, + sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats_ext) + return 0; + + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } return 0; } @@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp) } } +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) +{ + struct bnxt_napi *bnapi = bp->bnapi[n]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + return cpr->cp_ring_struct.map_idx; +} + static void bnxt_disable_int_sync(struct bnxt *bp) { int i; @@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp) atomic_inc(&bp->intr_sem); bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + synchronize_irq(bp->irq_tbl[map_idx].vector); + } } static void bnxt_enable_int(struct bnxt *bp) @@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len, *valid; + __le32 *resp_len; + u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; @@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, i = 0; tmo_count = timeout * 40; + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && @@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -1; } + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + valid = bp->hwrm_cmd_resp_addr + len - 1; } else { /* Check if response len is updated */ - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; @@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -1; } - /* Last word of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 4; + /* Last byte of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 1; for (i = 0; i < 5; i++) { - if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) break; udelay(1); } @@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } } + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; rc = le16_to_cpu(resp->error_code); if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", @@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.ver_maj = DRV_VER_MAJ; - req.ver_min = DRV_VER_MIN; - req.ver_upd = DRV_VER_UPD; + req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + req.ver_maj_8b = DRV_VER_MAJ; + req.ver_min_8b = DRV_VER_MIN; + req.ver_upd_8b = DRV_VER_UPD; + req.ver_maj = cpu_to_le16(DRV_VER_MAJ); + req.ver_min = cpu_to_le16(DRV_VER_MIN); + req.ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -3847,6 +3907,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_tpa_cfg_input req = {0}; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); if (tpa_flags) { @@ -3995,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) return rc; } +static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) + return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; + return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; +} + int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { unsigned int ring = 0, grp_idx; @@ -4050,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= - cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); + req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -4132,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - if (resp->flags & - cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + u32 flags = le32_to_cpu(resp->flags); + + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + if (flags & + VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) + bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4201,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, - u32 ring_type, u32 map_index, - u32 stats_ctx_id) + u32 ring_type, u32 map_index) { int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); @@ -4228,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, case HWRM_RING_ALLOC_TX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ - req.cmpl_ring_id = - cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + grp_info = &bp->grp_info[ring->grp_idx]; + req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; case HWRM_RING_ALLOC_RX: @@ -4318,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + u32 map_idx = ring->map_idx; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); + cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, + map_idx); if (rc) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); @@ -4337,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = txr->bnapi->index; - u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; + u32 map_idx = i; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx, fw_stats_ctx); + map_idx); if (rc) goto err_out; txr->tx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4353,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) u32 map_idx = rxr->bnapi->index; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx, INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4366,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; + u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - map_idx, - INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; @@ -4558,18 +4630,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } -static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) +static void +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, + int tx_rings, int rx_rings, int ring_grps, + int cp_rings, int vnics) { - struct hwrm_func_cfg_input req = {0}; u32 enables = 0; - int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - req.num_tx_rings = cpu_to_le16(tx_rings); + req->num_tx_rings = cpu_to_le16(tx_rings); if (bp->flags & BNXT_FLAG_NEW_RM) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | @@ -4578,16 +4649,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); } - if (!enables) + req->enables = cpu_to_le32(enables); +} + +static void +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, + struct hwrm_func_vf_cfg_input *req, int tx_rings, + int rx_rings, int ring_grps, int cp_rings, + int vnics) +{ + u32 enables = 0; + + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + + req->num_tx_rings = cpu_to_le16(tx_rings); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); + + req->enables = cpu_to_le32(enables); +} + +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int vnics) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); + if (!req.enables) return 0; - req.enables = cpu_to_le32(enables); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4604,7 +4712,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 enables = 0; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) { @@ -4612,22 +4719,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); - - req.enables = cpu_to_le32(enables); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4645,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); } +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp = bp->cp_nr_rings; + int ulp_msix, ulp_base; + + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (ulp_msix) { + ulp_base = bnxt_get_ulp_msix_base(bp); + cp += ulp_msix; + if ((ulp_base + ulp_msix) > cp) + cp = ulp_base + ulp_msix; + } + return cp; +} + +static bool bnxt_need_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); + int rx = bp->rx_nr_rings; + int vnic = 1, grp = rx; + + if (bp->hwrm_spec_code < 0x10601) + return false; + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + return true; + + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + return true; + return false; +} + static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, bool shared); static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; - int cp = bp->cp_nr_rings; int grp, rx_rings, rc; bool sh = false; int vnic = 1; - if (bp->hwrm_spec_code < 0x10601) + if (!bnxt_need_reserve_rings(bp)) return 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -4667,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - grp = bp->rx_nr_rings; - if (tx == hw_resc->resv_tx_rings && - (!(bp->flags & BNXT_FLAG_NEW_RM) || - (rx == hw_resc->resv_rx_rings && - grp == hw_resc->resv_hw_ring_grps && - cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics))) - return 0; rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); if (rc) @@ -4718,64 +4843,26 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static bool bnxt_need_reserve_rings(struct bnxt *bp) -{ - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int rx = bp->rx_nr_rings; - int vnic = 1; - - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) - return true; - - if (bp->flags & BNXT_FLAG_RFS) - vnic = rx + 1; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - if ((bp->flags & BNXT_FLAG_NEW_RM) && - (hw_resc->resv_rx_rings != rx || - hw_resc->resv_cp_rings != bp->cp_nr_rings || - hw_resc->resv_vnics != vnic)) - return true; - return false; -} - static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS; req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4783,38 +4870,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; - req.num_tx_rings = cpu_to_le16(tx_rings); - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (bp->flags & BNXT_FLAG_NEW_RM) flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); - } + req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4822,17 +4894,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings); + ring_grps, cp_rings, vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings); + cp_rings, vnics); } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -5060,7 +5132,7 @@ func_qcfg_exit: return rc; } -static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; @@ -5077,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) goto hwrm_func_resc_qcaps_exit; } + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + if (!all) + goto hwrm_func_resc_qcaps_exit; + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); @@ -5183,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) return rc; if (bp->hwrm_spec_code >= 0x10803) { - rc = bnxt_hwrm_func_resc_qcaps(bp); + rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; } @@ -5331,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) return rc; } +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -5423,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.cache_linesize = - FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -5745,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp) } for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); char *attr; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -5754,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp) else attr = "tx"; - snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, - i); - bp->irq_tbl[i].handler = bnxt_msix; + snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bp->irq_tbl[map_idx].handler = bnxt_msix; } } @@ -5817,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) bp->hw_resc.max_cp_rings = max; } -static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -5829,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +int bnxt_get_avail_msix(struct bnxt *bp, int num) +{ + int max_cp = bnxt_get_max_func_cp_rings(bp); + int max_irq = bnxt_get_max_func_irqs(bp); + int total_req = bp->cp_nr_rings + num; + int max_idx, avail_msix; + + max_idx = min_t(int, bp->total_irqs, max_cp); + avail_msix = max_idx - bp->cp_nr_rings; + if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) + return avail_msix; + + if (max_irq < total_req) { + num = max_irq - bp->cp_nr_rings; + if (num <= 0) + return 0; + } + return num; +} + +static int bnxt_get_num_msix(struct bnxt *bp) +{ + if (!(bp->flags & BNXT_FLAG_NEW_RM)) + return bnxt_get_max_func_irqs(bp); + + return bnxt_cp_rings_in_use(bp); +} + static int bnxt_init_msix(struct bnxt *bp) { - int i, total_vecs, rc = 0, min = 1; + int i, total_vecs, max, rc = 0, min = 1, ulp_msix; struct msix_entry *msix_ent; - total_vecs = bnxt_get_max_func_irqs(bp); + total_vecs = bnxt_get_num_msix(bp); + max = bnxt_get_max_func_irqs(bp); + if (total_vecs > max) + total_vecs = max; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -5848,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp) min = 2; total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); - if (total_vecs < 0) { + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (total_vecs < 0 || total_vecs < ulp_msix) { rc = -ENODEV; goto msix_setup_exit; } @@ -5861,11 +5985,10 @@ static int bnxt_init_msix(struct bnxt *bp) bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, - total_vecs, min == 1); + total_vecs - ulp_msix, min == 1); if (rc) goto msix_setup_exit; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->cp_nr_rings = (min == 1) ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -5897,7 +6020,6 @@ static int bnxt_init_inta(struct bnxt *bp) bp->rx_nr_rings = 1; bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; return 0; @@ -5927,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_USING_MSIX; } -static int bnxt_reserve_rings(struct bnxt *bp) +int bnxt_reserve_rings(struct bnxt *bp) { - int orig_cp = bp->hw_resc.resv_cp_rings; int tcs = netdev_get_num_tc(bp->dev); int rc; @@ -5941,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); return rc; } - if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) { + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (bnxt_get_num_msix(bp) != bp->total_irqs)) { + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); if (rc) return rc; } @@ -5970,7 +6094,9 @@ static void bnxt_free_irq(struct bnxt *bp) return; for (i = 0; i < bp->cp_nr_rings; i++) { - irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + irq = &bp->irq_tbl[map_idx]; if (irq->requested) { if (irq->have_cpumask) { irq_set_affinity_hint(irq->vector, NULL); @@ -5989,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp) int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; + struct cpu_rmap *rmap; #endif + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } +#ifdef CONFIG_RFS_ACCEL + rmap = bp->dev->rx_cpu_rmap; +#endif if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_irq *irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; + #ifdef CONFIG_RFS_ACCEL if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); @@ -6716,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_reserve_rings(bp); if (rc) return rc; - - rc = bnxt_setup_int_mode(bp); - if (rc) { - netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", - rc); - return rc; - } } if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { @@ -7485,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_port_qstats_ext(bp); + } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { int rc; @@ -7531,7 +7663,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; int rx_rings = rx; - int cp, rc; + int cp, vnics, rc; if (tcs) tx_sets = tcs; @@ -7547,10 +7679,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; + vnics = 1; + if (bp->flags & BNXT_FLAG_RFS) + vnics += rx_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); + return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, + vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -8195,6 +8332,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_set_vf_rate = bnxt_set_vf_bw, .ndo_set_vf_link_state = bnxt_set_vf_link_state, .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, + .ndo_set_vf_trust = bnxt_set_vf_trust, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnxt_poll_controller, @@ -8392,9 +8530,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); - /* Reduce default rings to reduce memory usage on multi-port cards */ - if (bp->port_count > 1) - dflt_rings = min_t(int, dflt_rings, 4); + /* Reduce default rings on multi-port cards so that total default + * rings do not exceed CPU count. + */ + if (bp->port_count > 1) { + int max_rings = + max_t(int, num_online_cpus() / bp->port_count, 1); + + dflt_rings = min_t(int, dflt_rings, max_rings); + } rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -8433,17 +8577,23 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) int rc; ASSERT_RTNL(); - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - return 0; - bnxt_hwrm_func_qcaps(bp); - __bnxt_close_nic(bp, true, false); + + if (netif_running(bp->dev)) + __bnxt_close_nic(bp, true, false); + + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); - if (rc) - dev_close(bp->dev); - else - rc = bnxt_open_nic(bp, true, false); + bnxt_ulp_irq_restart(bp, rc); + + if (netif_running(bp->dev)) { + if (rc) + dev_close(bp->dev); + else + rc = bnxt_open_nic(bp, true, false); + } + return rc; } @@ -8664,6 +8814,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + /* No TC has been set yet and rings may have been trimmed due to + * limited MSIX, so we re-initialize the TX rings per TC. + */ + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bnxt_get_wol_settings(bp); if (bp->flags & BNXT_FLAG_WOL_CAP) device_set_wakeup_enable(&pdev->dev, bp->wol); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1989c470172c..3d55d3b56865 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.9.0" +#define DRV_MODULE_VERSION "1.9.1" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 9 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -189,6 +189,7 @@ struct rx_cmp_ext { #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) __le32 rx_cmp_meta_data; + #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 @@ -572,6 +573,10 @@ struct bnxt_ring_struct { void **vmem; u16 fw_ring_id; /* Ring id filled by Chimp FW */ + union { + u16 grp_idx; + u16 map_idx; /* Used by cmpl rings */ + }; u8 queue_id; }; @@ -785,6 +790,7 @@ struct bnxt_hw_resc { u16 min_tx_rings; u16 max_tx_rings; u16 resv_tx_rings; + u16 max_tx_sch_inputs; u16 min_rx_rings; u16 max_rx_rings; u16 resv_rx_rings; @@ -814,6 +820,7 @@ struct bnxt_vf_info { #define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 +#define BNXT_VF_TRUST 0x10 u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; @@ -1150,7 +1157,9 @@ struct bnxt { #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 + #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_NEW_RM 0x8000000 + #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1270,8 +1279,10 @@ struct bnxt { struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; + struct rx_port_stats_ext *hw_rx_port_stats_ext; dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_tx_port_stats_map; + dma_addr_t hw_rx_port_stats_ext_map; int hw_port_stats_size; u16 hwrm_max_req_len; @@ -1382,6 +1393,9 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) +#define BNXT_RX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct rx_port_stats_ext, counter) / 8) + #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e @@ -1401,6 +1415,15 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } +/* For TX and RX ring doorbells with no ordering guarantee*/ +static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db, + u32 val) +{ + writel_relaxed(val, db); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel_relaxed(val, db); +} + /* For TX and RX ring doorbells */ static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val) { @@ -1431,13 +1454,17 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); +int bnxt_get_avail_msix(struct bnxt *bp, int num); +int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index d2e0af960bf5..69efde785f23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,7 +34,8 @@ struct bnxt_cos2bw_cfg { }; #define BNXT_LLQ(q_profile) \ - ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS) + ((q_profile) == \ + QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1801582076be..8d8ccd67e0e2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,6 +137,9 @@ reset_coalesce: #define BNXT_TX_STATS_ENTRY(counter) \ { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } +#define BNXT_RX_STATS_EXT_ENTRY(counter) \ + { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -181,6 +184,8 @@ static const struct { BNXT_RX_STATS_ENTRY(rx_bytes), BNXT_RX_STATS_ENTRY(rx_runt_bytes), BNXT_RX_STATS_ENTRY(rx_runt_frames), + BNXT_RX_STATS_ENTRY(rx_stat_discard), + BNXT_RX_STATS_ENTRY(rx_stat_err), BNXT_TX_STATS_ENTRY(tx_64b_frames), BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), @@ -216,9 +221,24 @@ static const struct { BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), BNXT_TX_STATS_ENTRY(tx_total_collisions), BNXT_TX_STATS_ENTRY(tx_bytes), + BNXT_TX_STATS_ENTRY(tx_xthol_frames), + BNXT_TX_STATS_ENTRY(tx_stat_discard), + BNXT_TX_STATS_ENTRY(tx_stat_error), +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_ext_arr[] = { + BNXT_RX_STATS_EXT_ENTRY(link_down_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), }; #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) static int bnxt_get_num_stats(struct bnxt *bp) { @@ -227,6 +247,9 @@ static int bnxt_get_num_stats(struct bnxt *bp) if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) + num_stats += BNXT_NUM_PORT_STATS_EXT; + return num_stats; } @@ -274,6 +297,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, bnxt_port_stats_arr[i].offset)); } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; + + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) { + buf[j] = le64_to_cpu(*(port_stats_ext + + bnxt_port_stats_ext_arr[i].offset)); + } + } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -334,6 +365,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) buf += ETH_GSTRING_LEN; } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) { + strcpy(buf, bnxt_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } break; case ETH_SS_TEST: if (bp->num_tests) @@ -388,15 +425,26 @@ static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int max_rx_rings, max_tx_rings, tcs; + int max_tx_sch_inputs; + + /* Get the most up-to-date max_tx_sch_inputs. */ + if (bp->flags & BNXT_FLAG_NEW_RM) + bnxt_hwrm_func_resc_qcaps(bp, false); + max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { max_rx_rings = 0; max_tx_rings = 0; } + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); tcs = netdev_get_num_tc(dev); if (tcs > 1) @@ -2535,16 +2583,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + *flags = 0; + } } else if (*flags == ETH_RESET_AP) { /* This feature is not supported in older firmware versions */ if (bp->hwrm_spec_code < 0x10803) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset Application Processor request successful.\n"); + *flags = 0; + } } else { rc = -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 82d17f8cc0db..0fe0ea8dce6c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -188,6 +188,7 @@ struct cmd_nums { #define HWRM_STAT_CTX_FREE 0xb1UL #define HWRM_STAT_CTX_QUERY 0xb2UL #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_SET_TIME 0xc8UL @@ -199,6 +200,7 @@ struct cmd_nums { #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL @@ -271,6 +273,7 @@ struct cmd_nums { #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -341,9 +344,9 @@ struct hwrm_err_output { #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 0 -#define HWRM_VERSION_STR "1.9.0.0" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 15 +#define HWRM_VERSION_STR "1.9.1.15" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -616,30 +619,6 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; -/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ -struct hwrm_async_event_cmpl_pf_drvr_unload { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT - __le16 event_id; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 -}; - /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -854,6 +833,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -966,10 +946,14 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2 __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; @@ -1124,10 +1108,14 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2 __le16 num_mcast_filters; }; @@ -1248,7 +1236,7 @@ struct hwrm_func_vf_vnic_ids_query_output { u8 valid; }; -/* hwrm_func_drv_rgtr_input (size:832b/104B) */ +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ struct hwrm_func_drv_rgtr_input { __le16 req_type; __le16 cmpl_ring; @@ -1256,8 +1244,9 @@ struct hwrm_func_drv_rgtr_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1277,14 +1266,18 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[3]; __le32 timestamp; u8 unused_1[4]; __le32 vf_req_fwd[8]; __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_drv_rgtr_output (size:128b/16B) */ @@ -1379,7 +1372,7 @@ struct hwrm_func_drv_qver_input { u8 unused_0[2]; }; -/* hwrm_func_drv_qver_output (size:128b/16B) */ +/* hwrm_func_drv_qver_output (size:192b/24B) */ struct hwrm_func_drv_qver_output { __le16 error_code; __le16 req_type; @@ -1398,11 +1391,15 @@ struct hwrm_func_drv_qver_output { #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[2]; u8 valid; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_resource_qcaps_input (size:192b/24B) */ @@ -1416,7 +1413,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:384b/48B) */ +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -1425,9 +1422,10 @@ struct hwrm_func_resource_qcaps_output { __le16 max_vfs; __le16 max_msix; __le16 vf_reservation_strategy; - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC __le16 min_rsscos_ctx; __le16 max_rsscos_ctx; __le16 min_cmpl_rings; @@ -1444,7 +1442,8 @@ struct hwrm_func_resource_qcaps_output { __le16 max_stat_ctx; __le16 min_hw_ring_grps; __le16 max_hw_ring_grps; - u8 unused_0; + __le16 max_tx_scheduler_inputs; + u8 unused_0[7]; u8 valid; }; @@ -1627,6 +1626,16 @@ struct hwrm_port_phy_cfg_output { u8 valid; }; +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + /* hwrm_port_phy_qcfg_input (size:192b/24B) */ struct hwrm_port_phy_qcfg_input { __le16 req_type; @@ -2030,6 +2039,33 @@ struct hwrm_port_qstats_output { u8 valid; }; +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[2]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_port_lpbk_qstats_input (size:128b/16B) */ struct hwrm_port_lpbk_qstats_input { __le16 req_type; @@ -2552,7 +2588,11 @@ struct hwrm_queue_qportcfg_input { #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX __le16 port_id; - u8 unused_0[2]; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; }; /* hwrm_queue_qportcfg_output (size:256b/32B) */ @@ -2571,52 +2611,68 @@ struct hwrm_queue_qportcfg_output { u8 queue_cos2bw_cfg_allowed; u8 queue_id0; u8 queue_id0_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN u8 queue_id1; u8 queue_id1_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN u8 queue_id2; u8 queue_id2_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN u8 queue_id3; u8 queue_id3_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN u8 queue_id4; u8 queue_id4_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN u8 queue_id5; u8 queue_id5_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN u8 queue_id6; u8 queue_id6_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN u8 queue_id7; u8 queue_id7_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN u8 valid; }; @@ -5180,6 +5236,29 @@ struct hwrm_stat_ctx_clr_stats_output { u8 valid; }; +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + /* tx_port_stats (size:3264b/408B) */ struct tx_port_stats { __le64 tx_64b_frames; @@ -5305,6 +5384,30 @@ struct rx_port_stats { __le64 rx_stat_err; }; +/* rx_port_stats_ext (size:320b/40B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + /* hwrm_fw_reset_input (size:192b/24B) */ struct hwrm_fw_reset_input { __le16 req_type; @@ -5313,14 +5416,15 @@ struct hwrm_fw_reset_input { __le16 target_id; __le64 resp_addr; u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL @@ -6253,8 +6357,7 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL - u8 pcie_lane_num; - u8 unused_0[6]; + u8 unused_0[7]; }; /* hwrm_selftest_exec_output (size:128b/16B) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d87faad901fe..f952963d594e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) return rc; } +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + + if (bnxt_vf_ndo_prep(bp, vf_id)) + return -EINVAL; + + vf = &bp->pf.vf[vf_id]; + if (trusted) + vf->flags |= BNXT_VF_TRUST; + else + vf->flags &= ~BNXT_VF_TRUST; + + return 0; +} + int bnxt_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) { @@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, else ivi->qos = 0; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); + ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -492,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) } mutex_unlock(&bp->hwrm_cmd_lock); if (pf->active_vfs) { - u16 n = 1; + u16 n = pf->active_vfs; - if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL) - n = pf->active_vfs; - - hw_resc->max_tx_rings -= vf_tx_rings * n; - hw_resc->max_rx_rings -= vf_rx_rings * n; - hw_resc->max_hw_ring_grps -= vf_ring_grps * n; - hw_resc->max_cp_rings -= vf_cp_rings * n; + hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * + n; + hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; hw_resc->max_rsscos_ctxs -= pf->active_vfs; - hw_resc->max_stat_ctxs -= vf_stat_ctx * n; - hw_resc->max_vnics -= vf_vnics * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; rc = pf->active_vfs; } @@ -886,18 +902,19 @@ exec_fwd_resp_exit: return rc; } -static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) { u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); struct hwrm_func_vf_cfg_input *req = (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; - /* Only allow VF to set a valid MAC address if the PF assigned MAC - * address is zero + /* Allow VF to set a valid MAC address, if trust is set to on or + * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { if (is_valid_ether_addr(req->dflt_mac_addr) && - !is_valid_ether_addr(vf->mac_addr)) { + ((vf->flags & BNXT_VF_TRUST) || + (!is_valid_ether_addr(vf->mac_addr)))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); } @@ -913,11 +930,17 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; bool mac_ok = false; - /* VF MAC address must first match PF MAC address, if it is valid. + if (!is_valid_ether_addr((const u8 *)req->l2_addr)) + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + + /* Allow VF to set a valid MAC address, if trust is set to on. + * Or VF MAC address must first match MAC address in PF's context. * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (is_valid_ether_addr(vf->mac_addr)) { + if (vf->flags & BNXT_VF_TRUST) { + mac_ok = true; + } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) mac_ok = true; } else if (is_valid_ether_addr(vf->vf_mac_addr)) { @@ -951,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); mutex_unlock(&bp->hwrm_cmd_lock); + phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { /* if physical link is down, force link up on VF */ @@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) switch (req_type) { case HWRM_FUNC_VF_CFG: - rc = bnxt_vf_store_mac(bp, vf); + rc = bnxt_vf_configure_mac(bp, vf); break; case HWRM_CFA_L2_FILTER_ALLOC: rc = bnxt_vf_validate_set_mac(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index dbc8d977fc5a..d10f6f6c7860 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); int bnxt_set_vf_bw(struct net_device *, int, int, int); int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index fbe6e208e17b..65c2cee35766 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) if (rc) netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", __func__, flow_handle, rc); + + if (rc) + rc = -EIO; return rc; } @@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, req.action_flags = cpu_to_le16(action_flags); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *flow_handle = resp->flow_handle; - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) + rc = -ENOSPC; + else if (rc) + rc = -EIO; return rc; } @@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + rc = -EIO; return rc; } @@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + + if (rc) + rc = -EIO; return rc; } @@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + rc = -EIO; return rc; } @@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + + if (rc) + rc = -EIO; return rc; } @@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); - if (!flow_node) { - netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", - tc_flow_cmd->cookie); + if (!flow_node) return -EINVAL; - } return __bnxt_tc_del_flow(bp, flow_node); } @@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); - if (!flow_node) { - netdev_info(bp->dev, "Error: no flow_node for cookie %lx", - tc_flow_cmd->cookie); + if (!flow_node) return -1; - } flow = &flow_node->flow; curr_stats = &flow->stats; @@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, } else { netdev_info(bp->dev, "error rc=%d", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc) + rc = -EIO; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 997e10e8b863..347e4f946eb2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,13 +101,28 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) return 0; } +static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) +{ + struct bnxt_en_dev *edev = bp->edev; + int num_msix, idx, i; + + num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + for (i = 0; i < num_msix; i++) { + ent[i].vector = bp->irq_tbl[idx + i].vector; + ent[i].ring_idx = idx + i; + ent[i].db_offset = (idx + i) * 0x80; + } +} + static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, struct bnxt_msix_entry *ent, int num_msix) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); int max_idx, max_cp_rings; - int avail_msix, i, idx; + int avail_msix, idx; + int rc = 0; ASSERT_RTNL(); if (ulp_id != BNXT_ROCE_ULP) @@ -116,23 +131,47 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, if (!(bp->flags & BNXT_FLAG_USING_MSIX)) return -ENODEV; + if (edev->ulp_tbl[ulp_id].msix_requested) + return -EAGAIN; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); - max_idx = min_t(int, bp->total_irqs, max_cp_rings); - avail_msix = max_idx - bp->cp_nr_rings; + avail_msix = bnxt_get_avail_msix(bp, num_msix); if (!avail_msix) return -ENOMEM; if (avail_msix > num_msix) avail_msix = num_msix; - idx = max_idx - avail_msix; - for (i = 0; i < avail_msix; i++) { - ent[i].vector = bp->irq_tbl[idx + i].vector; - ent[i].ring_idx = idx + i; - ent[i].db_offset = (idx + i) * 0x80; + if (bp->flags & BNXT_FLAG_NEW_RM) { + idx = bp->cp_nr_rings; + } else { + max_idx = min_t(int, bp->total_irqs, max_cp_rings); + idx = max_idx - avail_msix; } - bnxt_set_max_func_irqs(bp, max_idx - avail_msix); - bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + if (bp->total_irqs < (idx + avail_msix)) { + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_reserve_rings(bp); + } + } + if (rc) { + edev->ulp_tbl[ulp_id].msix_requested = 0; + return -EAGAIN; + } + + if (bp->flags & BNXT_FLAG_NEW_RM) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; + edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + } + bnxt_fill_msix_vecs(bp, ent); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix); + bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; return avail_msix; } @@ -146,11 +185,40 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) if (ulp_id != BNXT_ROCE_ULP) return -EINVAL; + if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return 0; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); msix_requested = edev->ulp_tbl[ulp_id].msix_requested; bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); edev->ulp_tbl[ulp_id].msix_requested = 0; - bnxt_set_max_func_irqs(bp, bp->total_irqs); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested); + edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + bnxt_open_nic(bp, true, false); + } + return 0; +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + } + return 0; +} + +int bnxt_get_ulp_msix_base(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested) + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + } return 0; } @@ -287,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp) } } +void bnxt_ulp_irq_stop(struct bnxt *bp) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_stop) + return; + ops->ulp_irq_stop(ulp->handle); + } +} + +void bnxt_ulp_irq_restart(struct bnxt *bp, int err) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + struct bnxt_msix_entry *ent = NULL; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_restart) + return; + + if (!err) { + ent = kcalloc(ulp->msix_requested, sizeof(*ent), + GFP_KERNEL); + if (!ent) + return; + bnxt_fill_msix_vecs(bp, ent); + } + ops->ulp_irq_restart(ulp->handle, ent); + kfree(ent); + } +} + void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { u16 event_id = le16_to_cpu(cmpl->event_id); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index d2471067dc37..df48ac71729f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,6 +20,12 @@ struct hwrm_async_event_cmpl; struct bnxt; +struct bnxt_msix_entry { + u32 vector; + u32 ring_idx; + u32 db_offset; +}; + struct bnxt_ulp_ops { /* async_notifier() cannot sleep (in BH context) */ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); @@ -27,12 +33,8 @@ struct bnxt_ulp_ops { void (*ulp_start)(void *); void (*ulp_sriov_config)(void *, int); void (*ulp_shutdown)(void *); -}; - -struct bnxt_msix_entry { - u32 vector; - u32 ring_idx; - u32 db_offset; + void (*ulp_irq_stop)(void *); + void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *); }; struct bnxt_fw_msg { @@ -49,6 +51,7 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; + u16 msix_base; atomic_t ref_count; }; @@ -60,6 +63,7 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ROCEV2_CAP 0x2 #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ BNXT_EN_FLAG_ROCEV2_CAP) + #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 const struct bnxt_en_ops *en_ops; struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; }; @@ -84,11 +88,15 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) return false; } +int bnxt_get_ulp_msix_num(struct bnxt *bp); +int bnxt_get_ulp_msix_base(struct bnxt *bp); void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); void bnxt_ulp_shutdown(struct bnxt *bp); +void bnxt_ulp_irq_stop(struct bnxt *bp); +void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b1e35a9accf1..0445f2c0c629 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -603,6 +603,8 @@ static int bcmgenet_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; ec->tx_max_coalesced_frames = bcmgenet_tdma_ring_readl(priv, DESC_INDEX, @@ -613,15 +615,57 @@ static int bcmgenet_get_coalesce(struct net_device *dev, ec->rx_coalesce_usecs = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + return 0; } +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_profile(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + static int bcmgenet_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int i; - u32 reg; /* Base system clock is 125Mhz, DMA timeout is this reference clock * divided by 1024, which yields roughly 8.192us, our maximum value @@ -641,7 +685,8 @@ static int bcmgenet_set_coalesce(struct net_device *dev, * transmitted, or when the ring is empty. */ if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || - ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) + ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low || + ec->use_adaptive_tx_coalesce) return -EOPNOTSUPP; /* Program all TX queues with the same values, as there is no @@ -655,25 +700,9 @@ static int bcmgenet_set_coalesce(struct net_device *dev, ec->tx_max_coalesced_frames, DMA_MBUF_DONE_THRESH); - for (i = 0; i < priv->hw_params->rx_queues; i++) { - bcmgenet_rdma_ring_writel(priv, i, - ec->rx_max_coalesced_frames, - DMA_MBUF_DONE_THRESH); - - reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); - reg &= ~DMA_TIMEOUT_MASK; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); - bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); - } - - bcmgenet_rdma_ring_writel(priv, DESC_INDEX, - ec->rx_max_coalesced_frames, - DMA_MBUF_DONE_THRESH); - - reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT); - reg &= ~DMA_TIMEOUT_MASK; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); - bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT); + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); return 0; } @@ -1321,7 +1350,7 @@ static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, dma_unmap_addr_set(cb, dma_addr, 0); } - return 0; + return NULL; } /* Simple helper to free a receive control block's resources */ @@ -1460,7 +1489,7 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *new_skb; u16 offset; u8 ip_proto; - u16 ip_ver; + __be16 ip_ver; u32 tx_csum_info; if (unlikely(skb_headroom(skb) < sizeof(*status))) { @@ -1480,12 +1509,12 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, status = (struct status_64 *)skb->data; if (skb->ip_summed == CHECKSUM_PARTIAL) { - ip_ver = htons(skb->protocol); + ip_ver = skb->protocol; switch (ip_ver) { - case ETH_P_IP: + case htons(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; break; - case ETH_P_IPV6: + case htons(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; break; default: @@ -1501,7 +1530,8 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, */ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { tx_csum_info |= STATUS_TX_CSUM_LV; - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + if (ip_proto == IPPROTO_UDP && + ip_ver == htons(ETH_P_IP)) tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; } else { tx_csum_info = 0; @@ -1713,6 +1743,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned long dma_flag; int len; unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; unsigned int p_index, mask; unsigned int discards; unsigned int chksum_ok = 0; @@ -1832,6 +1863,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, len -= ETH_FCS_LEN; } + bytes_processed += len; + /*Finish setting up the received SKB and send it to the kernel*/ skb->protocol = eth_type_trans(skb, priv->dev); ring->packets++; @@ -1854,6 +1887,9 @@ next: bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); } + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + return rxpktprocessed; } @@ -1862,6 +1898,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) { struct bcmgenet_rx_ring *ring = container_of(napi, struct bcmgenet_rx_ring, napi); + struct net_dim_sample dim_sample; unsigned int work_done; work_done = bcmgenet_desc_rx(ring, budget); @@ -1871,9 +1908,29 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) ring->int_enable(ring); } + if (ring->dim.use_dim) { + net_dim_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + return work_done; } +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct net_dim_cq_moder cur_profile = + net_dim_get_profile(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = NET_DIM_START_MEASURE; +} + /* Assign skb to RX DMA descriptor. */ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, struct bcmgenet_rx_ring *ring) @@ -2022,6 +2079,37 @@ static void init_umac(struct bcmgenet_priv *priv) dev_dbg(kdev, "done init umac\n"); } +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_profile(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + /* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, @@ -2111,13 +2199,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, if (ret) return ret; + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + /* Initialize Rx NAPI */ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, NAPI_POLL_WEIGHT); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); - bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); bcmgenet_rdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); @@ -2276,10 +2366,12 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); } ring = &priv->rx_rings[DESC_INDEX]; napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); } static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) @@ -2557,6 +2649,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) continue; rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); @@ -2601,6 +2694,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (status & UMAC_IRQ_RXDMA_DONE) { rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); @@ -3351,6 +3445,7 @@ static int bcmgenet_probe(struct platform_device *pdev) struct net_device *dev; const void *macaddr; struct resource *r; + unsigned int i; int err = -EIO; const char *phy_mode_str; @@ -3479,6 +3574,11 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + /* libphy will determine the link state */ netif_carrier_off(dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 3c50431ccd2a..b773bc07edf7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -16,6 +16,7 @@ #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/phy.h> +#include <linux/net_dim.h> /* total number of Buffer Descriptors, same for Rx/Tx */ #define TOTAL_DESC 256 @@ -572,6 +573,14 @@ struct bcmgenet_tx_ring { struct bcmgenet_priv *priv; }; +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct net_dim dim; +}; + struct bcmgenet_rx_ring { struct napi_struct napi; /* Rx NAPI struct */ unsigned long bytes; @@ -586,6 +595,9 @@ struct bcmgenet_rx_ring { unsigned int cb_ptr; /* Rx ring initial CB ptr */ unsigned int end_ptr; /* Rx ring end CB ptr */ unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; void (*int_enable)(struct bcmgenet_rx_ring *); void (*int_disable)(struct bcmgenet_rx_ring *); struct bcmgenet_priv *priv; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ecdef42f0ae6..ef4a0c326736 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -63,24 +63,24 @@ MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); /* 1 normal messages, 0 quiet .. 7 verbose. */ static int debug = 1; -module_param(debug, int, S_IRUGO); +module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Debug messages"); #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; -module_param(int_pktcnt_tx, int, S_IRUGO); +module_param(int_pktcnt_tx, int, 0444); MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); static int int_timeout_tx = 255; -module_param(int_timeout_tx, int, S_IRUGO); +module_param(int_timeout_tx, int, 0444); MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); static int int_pktcnt_rx = 64; -module_param(int_pktcnt_rx, int, S_IRUGO); +module_param(int_pktcnt_rx, int, 0444); MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); static int int_timeout_rx = 64; -module_param(int_timeout_rx, int, S_IRUGO); +module_param(int_timeout_rx, int, 0444); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a77ee2f8fb8d..08bbb639be1a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event) if (!(apedata & APE_FW_STATUS_READY)) return -EAGAIN; - /* Wait for up to 1 millisecond for APE to service previous event. */ - err = tg3_ape_event_lock(tp, 1000); + /* Wait for up to 20 millisecond for APE to service previous event. */ + err = tg3_ape_event_lock(tp, 20000); if (err) return err; @@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) switch (kind) { case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, APE_HOST_SEG_SIG_MAGIC); tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, @@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_START; break; case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - if (device_may_wakeup(&tp->pdev->dev) && tg3_flag(tp, WOL_ENABLE)) { tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, @@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) tg3_ape_send_event(tp, event); } +static void tg3_send_ape_heartbeat(struct tg3 *tp, + unsigned long interval) +{ + /* Check if hb interval has exceeded */ + if (!tg3_flag(tp, ENABLE_APE) || + time_before(jiffies, tp->ape_hb_jiffies + interval)) + return; + + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); + tp->ape_hb_jiffies = jiffies; +} + static void tg3_disable_ints(struct tg3 *tp) { int i; @@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) } } + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); return work_done; tx_recovery: @@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) } } + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); return work_done; tx_recovery: @@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, ENABLE_APE)) /* Write our heartbeat update interval to APE. */ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, - APE_HOST_HEARTBEAT_INT_DISABLE); + APE_HOST_HEARTBEAT_INT_5SEC); tg3_write_sig_post_reset(tp, RESET_KIND_INIT); @@ -10791,11 +10799,11 @@ static ssize_t tg3_show_temp(struct device *dev, } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, TG3_TEMP_SENSOR_OFFSET); -static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, TG3_TEMP_CAUTION_OFFSET); -static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, TG3_TEMP_MAX_OFFSET); static struct attribute *tg3_attrs[] = { @@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t) tp->asf_counter = tp->asf_multiplier; } + /* Update the APE heartbeat every 5 seconds.*/ + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); + spin_unlock(&tp->lock); restart_timer: @@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) pci_state_reg); tg3_ape_lock_init(tp); + tp->ape_hb_interval = + msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); } /* Set up tp->grc_local_ctrl before calling diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 47f51cc0566d..1d61aa3efda1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2508,6 +2508,7 @@ #define TG3_APE_LOCK_PHY3 5 #define TG3_APE_LOCK_GPIO 7 +#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval) #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 @@ -3423,6 +3424,10 @@ struct tg3 { struct device *hwmon_dev; bool link_up; bool pcierr_recovery; + + u32 ape_hb; + unsigned long ape_hb_interval; + unsigned long ape_hb_jiffies; }; /* Accessor macros for chip and asic attributes diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index a843076597ec..69cc3e0119d6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -46,7 +46,7 @@ module_param(bnad_ioc_auto_recover, uint, 0444); MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); static uint bna_debugfs_enable = 1; -module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); +module_param(bna_debugfs_enable, uint, 0644); MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index cebfe3bd086e..933799be0471 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -486,11 +486,11 @@ struct bnad_debugfs_entry { }; static const struct bnad_debugfs_entry bnad_debugfs_files[] = { - { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, }, - { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, }, - { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, }, - { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, }, - { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, }, + { "fwtrc", S_IFREG | 0444, &bnad_debugfs_op_fwtrc, }, + { "fwsave", S_IFREG | 0444, &bnad_debugfs_op_fwsave, }, + { "regrd", S_IFREG | 0644, &bnad_debugfs_op_regrd, }, + { "regwr", S_IFREG | 0200, &bnad_debugfs_op_regwr, }, + { "drvinfo", S_IFREG | 0444, &bnad_debugfs_op_drvinfo, }, }; static struct dentry *bna_debugfs_root; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index e84afcf1ecb5..b4c9268100bb 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -472,8 +472,44 @@ static int macb_mii_probe(struct net_device *dev) struct macb *bp = netdev_priv(dev); struct macb_platform_data *pdata; struct phy_device *phydev; - int phy_irq; - int ret; + struct device_node *np; + int phy_irq, ret, i; + + pdata = dev_get_platdata(&bp->pdev->dev); + np = bp->pdev->dev.of_node; + ret = 0; + + if (np) { + if (of_phy_is_fixed_link(np)) { + if (of_phy_register_fixed_link(np) < 0) { + dev_err(&bp->pdev->dev, + "broken fixed-link specification\n"); + return -ENODEV; + } + bp->phy_node = of_node_get(np); + } else { + bp->phy_node = of_parse_phandle(np, "phy-handle", 0); + /* fallback to standard phy registration if no + * phy-handle was found nor any phy found during + * dt phy registration + */ + if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + + phydev = mdiobus_scan(bp->mii_bus, i); + if (IS_ERR(phydev) && + PTR_ERR(phydev) != -ENODEV) { + ret = PTR_ERR(phydev); + break; + } + } + + if (ret) + return -ENODEV; + } + } + } if (bp->phy_node) { phydev = of_phy_connect(dev, bp->phy_node, @@ -488,7 +524,6 @@ static int macb_mii_probe(struct net_device *dev) return -ENXIO; } - pdata = dev_get_platdata(&bp->pdev->dev); if (pdata) { if (gpio_is_valid(pdata->phy_irq_pin)) { ret = devm_gpio_request(&bp->pdev->dev, @@ -533,7 +568,7 @@ static int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; struct device_node *np; - int err = -ENXIO, i; + int err; /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); @@ -556,43 +591,10 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; - if (np) { - if (of_phy_is_fixed_link(np)) { - if (of_phy_register_fixed_link(np) < 0) { - dev_err(&bp->pdev->dev, - "broken fixed-link specification\n"); - goto err_out_unregister_bus; - } - bp->phy_node = of_node_get(np); - - err = mdiobus_register(bp->mii_bus); - } else { - /* try dt phy registration */ - err = of_mdiobus_register(bp->mii_bus, np); - - /* fallback to standard phy registration if no phy were - * found during dt phy registration - */ - if (!err && !phy_find_first(bp->mii_bus)) { - for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *phydev; - phydev = mdiobus_scan(bp->mii_bus, i); - if (IS_ERR(phydev) && - PTR_ERR(phydev) != -ENODEV) { - err = PTR_ERR(phydev); - break; - } - } - - if (err) - goto err_out_unregister_bus; - } - } + if (np) { + err = of_mdiobus_register(bp->mii_bus, np); } else { - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - if (pdata) bp->mii_bus->phy_mask = pdata->phy_mask; @@ -610,10 +612,10 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); -err_out_free_mdiobus: - of_node_put(bp->phy_node); if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); +err_out_free_mdiobus: + of_node_put(bp->phy_node); mdiobus_free(bp->mii_bus); err_out: return err; @@ -3950,10 +3952,16 @@ static int macb_probe(struct platform_device *pdev) dev->max_mtu = ETH_DATA_LEN; mac = of_get_mac_address(np); - if (mac) + if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); - else - macb_get_hwaddr(bp); + } else { + err = of_get_nvmem_mac_address(np, bp->dev->dev_addr); + if (err) { + if (err == -EPROBE_DEFER) + goto err_out_free_netdev; + macb_get_hwaddr(bp); + } + } err = of_get_phy_mode(np); if (err < 0) { diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index c87c9c684a33..6aeb1045c302 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get); void cavium_ptp_put(struct cavium_ptp *ptp) { + if (!ptp) + return; pci_dev_put(ptp->pdev); } EXPORT_SYMBOL(cavium_ptp_put); @@ -334,18 +336,7 @@ static struct pci_driver cavium_ptp_driver = { .remove = cavium_ptp_remove, }; -static int __init cavium_ptp_init_module(void) -{ - return pci_register_driver(&cavium_ptp_driver); -} - -static void __exit cavium_ptp_cleanup_module(void) -{ - pci_unregister_driver(&cavium_ptp_driver); -} - -module_init(cavium_ptp_init_module); -module_exit(cavium_ptp_cleanup_module); +module_pci_driver(cavium_ptp_driver); MODULE_DESCRIPTION(DRV_NAME); MODULE_AUTHOR("Cavium Networks <support@cavium.com>"); diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile index e3fc4645cd8a..bc9937502043 100644 --- a/drivers/net/ethernet/cavium/liquidio/Makefile +++ b/drivers/net/ethernet/cavium/liquidio/Makefile @@ -2,42 +2,23 @@ # # Cavium Liquidio ethernet device driver # -obj-$(CONFIG_LIQUIDIO) += liquidio.o -liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \ - lio_core.o \ - request_manager.o \ - response_manager.o \ - octeon_device.o \ - cn66xx_device.o \ - cn68xx_device.o \ - cn23xx_pf_device.o \ - cn23xx_vf_device.o \ - octeon_mailbox.o \ - octeon_mem_ops.o \ - octeon_droq.o \ - octeon_nic.o +common-objs := lio_ethtool.o \ + lio_core.o \ + request_manager.o \ + response_manager.o \ + octeon_device.o \ + cn66xx_device.o \ + cn68xx_device.o \ + cn23xx_pf_device.o \ + cn23xx_vf_device.o \ + octeon_mailbox.o \ + octeon_mem_ops.o \ + octeon_droq.o \ + octeon_nic.o -liquidio-objs := lio_main.o octeon_console.o lio_vf_rep.o $(liquidio-y) +obj-$(CONFIG_LIQUIDIO) += liquidio.o +liquidio-y := lio_main.o octeon_console.o lio_vf_rep.o $(common-objs) obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o - -ifeq ($(CONFIG_LIQUIDIO)$(CONFIG_LIQUIDIO_VF), yy) - liquidio_vf-objs := lio_vf_main.o -else -liquidio_vf-$(CONFIG_LIQUIDIO_VF) += lio_ethtool.o \ - lio_core.o \ - request_manager.o \ - response_manager.o \ - octeon_device.o \ - cn66xx_device.o \ - cn68xx_device.o \ - cn23xx_pf_device.o \ - cn23xx_vf_device.o \ - octeon_mailbox.o \ - octeon_mem_ops.o \ - octeon_droq.o \ - octeon_nic.o - -liquidio_vf-objs := lio_vf_main.o $(liquidio_vf-y) -endif +liquidio_vf-y := lio_vf_main.o $(common-objs) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 32ae63b6f20e..2a94eee943b2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -164,15 +164,6 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) } break; - case OCTNET_CMD_CHANGE_MTU: - /* If command is successful, change the MTU. */ - netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", - netdev->mtu, nctrl->ncmd.s.param1); - netdev->mtu = nctrl->ncmd.s.param1; - queue_delayed_work(lio->link_status_wq.wq, - &lio->link_status_wq.wk.work, 0); - break; - case OCTNET_CMD_GPIO_ACCESS: netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); @@ -386,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) return; lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - netif_wake_subqueue(netdev, iq->q_index); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - } - } else if (netif_queue_stopped(netdev) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); - netif_wake_queue(netdev); } } @@ -571,7 +554,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), napi_gro_receive(napi, skb); - droq->stats.rx_bytes_received += len; + droq->stats.rx_bytes_received += len - + rh->r_dh.len * BYTES_PER_DHLEN_UNIT; droq->stats.rx_pkts_received++; } else { recv_buffer_free(skb); @@ -635,9 +619,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq_no = droq->q_no; /* Handle Droq descriptors */ - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); + work_done = octeon_droq_process_poll_pkts(oct, droq, budget); /* Flush the instruction queue */ iq = oct->instr_queue[iq_no]; @@ -668,8 +650,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) tx_done = 1; napi_complete_done(napi, work_done); - octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, - POLL_EVENT_ENABLE_INTR, 0); + octeon_enable_irq(droq->oct_dev, droq->q_no); return 0; } @@ -1080,3 +1061,111 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) } return 0; } + +static void liquidio_change_mtu_completion(struct octeon_device *oct, + u32 status, void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_context *ctx; + + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + if (status) { + dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n", + CVM_CAST64(status)); + WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL); + } else { + WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS); + } + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Net device change_mtu + * @param netdev network device + */ +int liquidio_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct liquidio_if_cfg_context *ctx; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int ctx_size; + int ret = 0; + + ctx_size = sizeof(struct liquidio_if_cfg_context); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size); + + ncmd = (union octnet_cmd *)sc->virtdptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct); + init_waitqueue_head(&ctx->wc); + + ncmd->u64 = 0; + ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; + ncmd->s.param1 = new_mtu; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_CMD, 0, 0, 0); + + sc->callback = liquidio_change_mtu_completion; + sc->callback_arg = sc; + sc->wait_time = 100; + + ret = octeon_send_soft_command(oct, sc); + if (ret == IQ_SEND_FAILED) { + netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); + return -EINVAL; + } + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR || + ctx->cond == LIO_CHANGE_MTU_FAIL) { + octeon_free_soft_command(oct, sc); + return -EINVAL; + } + + netdev->mtu = new_mtu; + lio->mtu = new_mtu; + + octeon_free_soft_command(oct, sc); + return 0; +} + +int lio_wait_for_clean_oq(struct octeon_device *oct) +{ + int retry = 100, pending_pkts = 0; + int idx; + + do { + pending_pkts = 0; + + for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) { + if (!(oct->io_qmask.oq & BIT_ULL(idx))) + continue; + pending_pkts += + atomic_read(&oct->droq[idx]->pkts_pending); + } + + if (pending_pkts > 0) + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pending_pkts; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index a63ddf07f168..550ac29682a5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -232,10 +232,16 @@ static int lio_get_link_ksettings(struct net_device *netdev, linfo = &lio->linfo; - if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || - linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + switch (linfo->link.s.phy_type) { + case LIO_PHY_PORT_TP: + ecmd->base.port = PORT_TP; + supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_TP | SUPPORTED_Pause); + advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); + ecmd->base.autoneg = AUTONEG_DISABLE; + break; + + case LIO_PHY_PORT_FIBRE: ecmd->base.port = PORT_FIBRE; if (linfo->link.s.speed == SPEED_10000) { @@ -245,12 +251,18 @@ static int lio_get_link_ksettings(struct net_device *netdev, supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; advertising |= ADVERTISED_Pause; + ecmd->base.autoneg = AUTONEG_DISABLE; + break; + } + + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ethtool_convert_legacy_u32_to_link_mode( ecmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode( ecmd->link_modes.advertising, advertising); - ecmd->base.autoneg = AUTONEG_DISABLE; - } else { dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", linfo->link.s.if_mode); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index a5eecd895a82..603a144d3d9c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -91,18 +91,9 @@ static int octeon_console_debug_enabled(u32 console) */ #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 -struct liquidio_if_cfg_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - -struct liquidio_if_cfg_resp { - u64 rh; - struct liquidio_if_cfg_info cfg_info; - u64 status; +struct lio_trusted_vf_ctx { + struct completion complete; + int status; }; struct liquidio_rx_ctl_context { @@ -523,148 +514,30 @@ static void liquidio_deinit_pci(void) } /** - * \brief Stop Tx queues - * @param netdev network device - */ -static inline void txqs_stop(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_stop_subqueue(netdev, i); - } else { - netif_stop_queue(netdev); - } -} - -/** - * \brief Start Tx queues - * @param netdev network device - */ -static inline void txqs_start(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_start_subqueue(netdev, i); - } else { - netif_start_queue(netdev); - } -} - -/** - * \brief Wake Tx queues - * @param netdev network device - */ -static inline void txqs_wake(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % - lio->oct_dev->num_iqs].s.q_no; - - if (__netif_subqueue_stopped(netdev, i)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, - tx_restart, 1); - netif_wake_subqueue(netdev, i); - } - } - } else { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - netif_wake_queue(netdev); - } -} - -/** - * \brief Stop Tx queue - * @param netdev network device - */ -static void stop_txq(struct net_device *netdev) -{ - txqs_stop(netdev); -} - -/** - * \brief Start Tx queue - * @param netdev network device - */ -static void start_txq(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (lio->linfo.link.s.link_up) { - txqs_start(netdev); - return; - } -} - -/** - * \brief Wake a queue - * @param netdev network device - * @param q which queue to wake - */ -static inline void wake_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_wake_subqueue(netdev, q); - else - netif_wake_queue(netdev); -} - -/** - * \brief Stop a queue - * @param netdev network device - * @param q which queue to stop - */ -static inline void stop_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, q); - else - netif_stop_queue(netdev); -} - -/** * \brief Check Tx queue status, and take appropriate action * @param lio per-network private data * @returns 0 if full, number of queues woken up otherwise */ static inline int check_txq_status(struct lio *lio) { + int numqs = lio->netdev->num_tx_queues; int ret_val = 0; + int q, iq; - if (netif_is_multiqueue(lio->netdev)) { - int numqs = lio->netdev->num_tx_queues; - int q, iq = 0; - - /* check each sub-queue state */ - for (q = 0; q < numqs; q++) { - iq = lio->linfo.txpciq[q % - lio->oct_dev->num_iqs].s.q_no; - if (octnet_iq_is_full(lio->oct_dev, iq)) - continue; - if (__netif_subqueue_stopped(lio->netdev, q)) { - wake_q(lio->netdev, q); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, - tx_restart, 1); - ret_val++; - } + /* check each sub-queue state */ + for (q = 0; q < numqs; q++) { + iq = lio->linfo.txpciq[q % + lio->oct_dev->num_iqs].s.q_no; + if (octnet_iq_is_full(lio->oct_dev, iq)) + continue; + if (__netif_subqueue_stopped(lio->netdev, q)) { + netif_wake_subqueue(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); + ret_val++; } - } else { - if (octnet_iq_is_full(lio->oct_dev, lio->txq)) - return 0; - wake_q(lio->netdev, lio->txq); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - ret_val = 1; } + return ret_val; } @@ -841,8 +714,12 @@ static void octnet_link_status_change(struct work_struct *work) struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; + /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. + * this API is invoked only when new max-MTU of the interface is + * less than current MTU. + */ rtnl_lock(); - call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); + dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); rtnl_unlock(); } @@ -891,7 +768,11 @@ static inline void update_link_status(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); int changed = (lio->linfo.link.u64 != ls->u64); + int current_max_mtu = lio->linfo.link.s.mtu; + struct octeon_device *oct = lio->oct_dev; + dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", + __func__, lio->linfo.link.u64, ls->u64); lio->linfo.link.u64 = ls->u64; if ((lio->intf_open) && (changed)) { @@ -899,11 +780,25 @@ static inline void update_link_status(struct net_device *netdev, lio->link_changes++; if (lio->linfo.link.s.link_up) { + dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); netif_carrier_on(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } else { + dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); netif_carrier_off(netdev); - stop_txq(netdev); + stop_txqs(netdev); + } + if (lio->linfo.link.s.mtu != current_max_mtu) { + netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", + current_max_mtu, lio->linfo.link.s.mtu); + netdev->max_mtu = lio->linfo.link.s.mtu; + } + if (lio->linfo.link.s.mtu < netdev->mtu) { + dev_warn(&oct->pci_dev->dev, + "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", + netdev->mtu, lio->linfo.link.s.mtu); + queue_delayed_work(lio->link_status_wq.wq, + &lio->link_status_wq.wk.work, 0); } } } @@ -1739,43 +1634,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } -static inline int skb_iq(struct lio *lio, struct sk_buff *skb) -{ - int q = 0; - - if (netif_is_multiqueue(lio->netdev)) - q = skb->queue_mapping % lio->linfo.num_txpciq; - - return q; -} - -/** - * \brief Check Tx queue state for a given network buffer - * @param lio per-network private data - * @param skb network buffer - */ -static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) -{ - int q = 0, iq = 0; - - if (netif_is_multiqueue(lio->netdev)) { - q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; - } else { - iq = lio->txq; - q = iq; - } - - if (octnet_iq_is_full(lio->oct_dev, iq)) - return 0; - - if (__netif_subqueue_stopped(lio->netdev, q)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); - wake_q(lio->netdev, q); - } - return 1; -} - /** * \brief Unmap and free network buffer * @param buf buffer @@ -1793,8 +1651,6 @@ static void free_netbuf(void *buf) dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, DMA_TO_DEVICE); - check_txq_state(lio, skb); - tx_buffer_free(skb); } @@ -1835,8 +1691,6 @@ static void free_netsgbuf(void *buf) list_add_tail(&g->list, &lio->glist[iq]); spin_unlock(&lio->glist_lock[iq]); - check_txq_state(lio, skb); /* mq support: sub-queue state check */ - tx_buffer_free(skb); } @@ -1882,8 +1736,6 @@ static void free_netsgbuf_with_resp(void *buf) spin_unlock(&lio->glist_lock[iq]); /* Don't free the skb yet */ - - check_txq_state(lio, skb); } /** @@ -2211,7 +2063,7 @@ static int liquidio_open(struct net_device *netdev) return -1; } - start_txq(netdev); + start_txqs(netdev); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2232,16 +2084,6 @@ static int liquidio_stop(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; - if (oct->props[lio->ifidx].napi_enabled) { - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_disable(napi); - - oct->props[lio->ifidx].napi_enabled = 0; - - if (OCTEON_CN23XX_PF(oct)) - oct->droq[0]->ops.poll_mode = 0; - } - ifstate_reset(lio, LIO_IFSTATE_RUNNING); netif_tx_disable(netdev); @@ -2267,6 +2109,21 @@ static int liquidio_stop(struct net_device *netdev) lio->ptp_clock = NULL; } + /* Wait for any pending Rx descriptors */ + if (lio_wait_for_clean_oq(oct)) + netif_info(lio, rx_err, lio->netdev, + "Proceeding with stop interface after partial RX desc processing\n"); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); return 0; @@ -2449,38 +2306,6 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) } /** - * \brief Net device change_mtu - * @param netdev network device - */ -static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; - struct octnic_ctrl_pkt nctrl; - int ret = 0; - - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - - nctrl.ncmd.u64 = 0; - nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; - nctrl.ncmd.s.param1 = new_mtu; - nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; - nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); - return -1; - } - - lio->mtu = new_mtu; - - return 0; -} - -/** * \brief Handler for SIOCSHWTSTAMP ioctl * @param netdev network device * @param ifr interface request @@ -2685,14 +2510,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - if (netif_is_multiqueue(netdev)) { - q_idx = skb->queue_mapping; - q_idx = (q_idx % (lio->linfo.num_txpciq)); - tag = q_idx; - iq_no = lio->linfo.txpciq[q_idx].s.q_no; - } else { - iq_no = lio->txq; - } + q_idx = skb_iq(lio, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; stats = &oct->instr_queue[iq_no]->stats; @@ -2723,23 +2543,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.q_no = iq_no; - if (netif_is_multiqueue(netdev)) { - if (octnet_iq_is_full(oct, ndata.q_no)) { - /* defer sending if queue is full */ - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - stats->tx_iq_busy++; - return NETDEV_TX_BUSY; - } - } else { - if (octnet_iq_is_full(oct, lio->txq)) { - /* defer sending if queue is full */ - stats->tx_iq_busy++; - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - lio->txq); - return NETDEV_TX_BUSY; - } + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; } + /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); */ @@ -2895,7 +2706,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); if (status == IQ_SEND_STOP) - stop_q(netdev, q_idx); + netif_stop_subqueue(netdev, q_idx); netif_trans_update(netdev); @@ -2934,7 +2745,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); netif_trans_update(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } static int liquidio_vlan_rx_add_vid(struct net_device *netdev, @@ -3289,10 +3100,120 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, ether_addr_copy(&ivi->mac[0], macaddr); ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; + if (oct->sriov_info.trusted_vf.active && + oct->sriov_info.trusted_vf.id == vfidx) + ivi->trusted = true; + else + ivi->trusted = false; ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; return 0; } +static void trusted_vf_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct lio_trusted_vf_ctx *ctx; + + ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; + ctx->status = status; + + complete(&ctx->complete); +} + +static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) +{ + struct octeon_device *oct = lio->oct_dev; + struct lio_trusted_vf_ctx *ctx; + struct octeon_soft_command *sc; + int ctx_size, retval; + + ctx_size = sizeof(struct lio_trusted_vf_ctx); + sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size); + + ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; + init_completion(&ctx->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, + trusted); + + sc->callback = trusted_vf_callback; + sc->callback_arg = sc; + sc->wait_time = 1000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + retval = -1; + } else { + /* Wait for response or timeout */ + if (wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies(2000))) + retval = ctx->status; + else + retval = -1; + } + + octeon_free_soft_command(oct, sc); + + return retval; +} + +static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, + bool setting) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { + /* trusted vf is not supported by firmware older than 1.7.1 */ + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (setting) { + /* Set */ + + if (oct->sriov_info.trusted_vf.active && + oct->sriov_info.trusted_vf.id == vfidx) + return 0; + + if (oct->sriov_info.trusted_vf.active) { + netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); + return -EPERM; + } + } else { + /* Clear */ + + if (!oct->sriov_info.trusted_vf.active) + return 0; + } + + if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { + if (setting) { + oct->sriov_info.trusted_vf.id = vfidx; + oct->sriov_info.trusted_vf.active = true; + } else { + oct->sriov_info.trusted_vf.active = false; + } + + netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, + setting ? "" : "not "); + } else { + netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); + return -1; + } + + return 0; +} + static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, int linkstate) { @@ -3423,6 +3344,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_mac = liquidio_set_vf_mac, .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index fd70a4844e2d..f92dfa411de6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -40,20 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -struct liquidio_if_cfg_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - -struct liquidio_if_cfg_resp { - u64 rh; - struct liquidio_if_cfg_info cfg_info; - u64 status; -}; - struct liquidio_rx_ctl_context { int octeon_id; @@ -299,105 +285,6 @@ static struct pci_driver liquidio_vf_pci_driver = { }; /** - * \brief Stop Tx queues - * @param netdev network device - */ -static void txqs_stop(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_stop_subqueue(netdev, i); - } else { - netif_stop_queue(netdev); - } -} - -/** - * \brief Start Tx queues - * @param netdev network device - */ -static void txqs_start(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_start_subqueue(netdev, i); - } else { - netif_start_queue(netdev); - } -} - -/** - * \brief Wake Tx queues - * @param netdev network device - */ -static void txqs_wake(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs] - .s.q_no; - if (__netif_subqueue_stopped(netdev, i)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, - tx_restart, 1); - netif_wake_subqueue(netdev, i); - } - } - } else { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - netif_wake_queue(netdev); - } -} - -/** - * \brief Start Tx queue - * @param netdev network device - */ -static void start_txq(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (lio->linfo.link.s.link_up) { - txqs_start(netdev); - return; - } -} - -/** - * \brief Wake a queue - * @param netdev network device - * @param q which queue to wake - */ -static void wake_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_wake_subqueue(netdev, q); - else - netif_wake_queue(netdev); -} - -/** - * \brief Stop a queue - * @param netdev network device - * @param q which queue to stop - */ -static void stop_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, q); - else - netif_stop_queue(netdev); -} - -/** * Remove the node at the head of the list. The list would be empty at * the end of this call if there are no more nodes in the list. */ @@ -564,8 +451,12 @@ static void octnet_link_status_change(struct work_struct *work) struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; + /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. + * this API is invoked only when new max-MTU of the interface is + * less than current MTU. + */ rtnl_lock(); - call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); + dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); rtnl_unlock(); } @@ -613,6 +504,7 @@ static void update_link_status(struct net_device *netdev, union oct_link_status *ls) { struct lio *lio = GET_LIO(netdev); + int current_max_mtu = lio->linfo.link.s.mtu; struct octeon_device *oct = lio->oct_dev; if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { @@ -623,24 +515,23 @@ static void update_link_status(struct net_device *netdev, if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } else { netif_carrier_off(netdev); - txqs_stop(netdev); + stop_txqs(netdev); } - if (lio->linfo.link.s.mtu != netdev->max_mtu) { - dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n", - netdev->max_mtu, lio->linfo.link.s.mtu); + if (lio->linfo.link.s.mtu != current_max_mtu) { + dev_info(&oct->pci_dev->dev, + "Max MTU Changed from %d to %d\n", + current_max_mtu, lio->linfo.link.s.mtu); netdev->max_mtu = lio->linfo.link.s.mtu; } if (lio->linfo.link.s.mtu < netdev->mtu) { dev_warn(&oct->pci_dev->dev, - "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", + "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", netdev->mtu, lio->linfo.link.s.mtu); - lio->mtu = lio->linfo.link.s.mtu; - netdev->mtu = lio->linfo.link.s.mtu; queue_delayed_work(lio->link_status_wq.wq, &lio->link_status_wq.wk.work, 0); } @@ -1062,44 +953,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } -static int skb_iq(struct lio *lio, struct sk_buff *skb) -{ - int q = 0; - - if (netif_is_multiqueue(lio->netdev)) - q = skb->queue_mapping % lio->linfo.num_txpciq; - - return q; -} - -/** - * \brief Check Tx queue state for a given network buffer - * @param lio per-network private data - * @param skb network buffer - */ -static int check_txq_state(struct lio *lio, struct sk_buff *skb) -{ - int q = 0, iq = 0; - - if (netif_is_multiqueue(lio->netdev)) { - q = skb->queue_mapping; - iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; - } else { - iq = lio->txq; - q = iq; - } - - if (octnet_iq_is_full(lio->oct_dev, iq)) - return 0; - - if (__netif_subqueue_stopped(lio->netdev, q)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); - wake_q(lio->netdev, q); - } - - return 1; -} - /** * \brief Unmap and free network buffer * @param buf buffer @@ -1117,8 +970,6 @@ static void free_netbuf(void *buf) dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, DMA_TO_DEVICE); - check_txq_state(lio, skb); - tx_buffer_free(skb); } @@ -1160,8 +1011,6 @@ static void free_netsgbuf(void *buf) list_add_tail(&g->list, &lio->glist[iq]); spin_unlock(&lio->glist_lock[iq]); - check_txq_state(lio, skb); /* mq support: sub-queue state check */ - tx_buffer_free(skb); } @@ -1207,8 +1056,6 @@ static void free_netsgbuf_with_resp(void *buf) spin_unlock(&lio->glist_lock[iq]); /* Don't free the skb yet */ - - check_txq_state(lio, skb); } /** @@ -1268,7 +1115,7 @@ static int liquidio_open(struct net_device *netdev) lio->intf_open = 1; netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - start_txq(netdev); + start_txqs(netdev); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -1291,15 +1138,6 @@ static int liquidio_stop(struct net_device *netdev) /* tell Octeon to stop forwarding packets to host */ send_rx_ctrl_cmd(lio, 0); - if (oct->props[lio->ifidx].napi_enabled) { - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_disable(napi); - - oct->props[lio->ifidx].napi_enabled = 0; - - oct->droq[0]->ops.poll_mode = 0; - } - netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */ lio->intf_open = 0; @@ -1310,7 +1148,21 @@ static int liquidio_stop(struct net_device *netdev) ifstate_reset(lio, LIO_IFSTATE_RUNNING); - txqs_stop(netdev); + stop_txqs(netdev); + + /* Wait for any pending Rx descriptors */ + if (lio_wait_for_clean_oq(oct)) + netif_info(lio, rx_err, lio->netdev, + "Proceeding with stop interface after partial RX desc processing\n"); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); @@ -1538,41 +1390,6 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) } /** - * \brief Net device change_mtu - * @param netdev network device - */ -static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct octnic_ctrl_pkt nctrl; - struct octeon_device *oct; - struct lio *lio; - int ret = 0; - - lio = GET_LIO(netdev); - oct = lio->oct_dev; - - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - - nctrl.ncmd.u64 = 0; - nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; - nctrl.ncmd.s.param1 = new_mtu; - nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = LIO_CMD_WAIT_TM; - nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); - return -EIO; - } - - lio->mtu = new_mtu; - - return 0; -} - -/** * \brief Handler for SIOCSHWTSTAMP ioctl * @param netdev network device * @param ifr interface request @@ -1763,14 +1580,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - if (netif_is_multiqueue(netdev)) { - q_idx = skb->queue_mapping; - q_idx = (q_idx % (lio->linfo.num_txpciq)); - tag = q_idx; - iq_no = lio->linfo.txpciq[q_idx].s.q_no; - } else { - iq_no = lio->txq; - } + q_idx = skb_iq(lio, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; stats = &oct->instr_queue[iq_no]->stats; @@ -1799,22 +1611,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.q_no = iq_no; - if (netif_is_multiqueue(netdev)) { - if (octnet_iq_is_full(oct, ndata.q_no)) { - /* defer sending if queue is full */ - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - stats->tx_iq_busy++; - return NETDEV_TX_BUSY; - } - } else { - if (octnet_iq_is_full(oct, lio->txq)) { - /* defer sending if queue is full */ - stats->tx_iq_busy++; - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - return NETDEV_TX_BUSY; - } + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; } ndata.datasize = skb->len; @@ -1956,7 +1758,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (status == IQ_SEND_STOP) { dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", iq_no); - stop_q(netdev, q_idx); + netif_stop_subqueue(netdev, q_idx); } netif_trans_update(netdev); @@ -1996,7 +1798,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); netif_trans_update(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } static int diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 522dcc4dcff7..75eea83c7cc6 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -84,6 +84,7 @@ enum octeon_tag_type { #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_SET_TRUSTED_VF 0x13 #define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define VF_DRV_LOADED 1 #define VF_DRV_REMOVED -1 @@ -192,7 +193,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) -#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) +#define OCTNET_DEFAULT_MTU (1500) +#define OCTNET_DEFAULT_FRM_SIZE (OCTNET_DEFAULT_MTU + OCTNET_FRM_HEADER_SIZE) /** NIC Commands are sent using this Octeon Input Queue */ #define OCTNET_CMD_Q 0 @@ -675,9 +677,11 @@ union oct_link_status { u64 if_mode:5; u64 pause:1; u64 flashing:1; - u64 reserved:15; + u64 phy_type:5; + u64 reserved:10; #else - u64 reserved:15; + u64 reserved:10; + u64 phy_type:5; u64 flashing:1; u64 pause:1; u64 if_mode:5; @@ -690,6 +694,12 @@ union oct_link_status { } s; }; +enum lio_phy_type { + LIO_PHY_PORT_TP = 0x0, + LIO_PHY_PORT_FIBRE = 0x1, + LIO_PHY_PORT_UNKNOWN, +}; + /** The txpciq info passed to host from the firmware */ union oct_txpciq { @@ -702,9 +712,13 @@ union oct_txpciq { u64 pkind:6; u64 use_qpg:1; u64 qpg:11; - u64 reserved:30; + u64 reserved0:10; + u64 ctrl_qpg:11; + u64 reserved:9; #else - u64 reserved:30; + u64 reserved:9; + u64 ctrl_qpg:11; + u64 reserved0:10; u64 qpg:11; u64 use_qpg:1; u64 pkind:6; @@ -909,6 +923,12 @@ union oct_nic_if_cfg { } s; }; +struct lio_trusted_vf { + uint64_t active: 1; + uint64_t id : 8; + uint64_t reserved: 55; +}; + struct lio_time { s64 sec; /* seconds */ s64 nsec; /* nanoseconds */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 63b0c758a0a6..91937cc5c1d7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -370,6 +370,8 @@ struct octeon_sriov_info { u32 sriov_enabled; + struct lio_trusted_vf trusted_vf; + /*lookup table that maps DPI ring number to VF pci_dev struct pointer*/ struct pci_dev *dpiring_to_vfpcidev_lut[MAX_POSSIBLE_VFS]; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 3461d65ff4eb..f044718cea52 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -788,7 +788,7 @@ octeon_droq_process_packets(struct octeon_device *oct, * called before calling this routine. */ -static int +int octeon_droq_process_poll_pkts(struct octeon_device *oct, struct octeon_droq *droq, u32 budget) { @@ -835,71 +835,46 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, return total_pkts_processed; } +/* Enable Pkt Interrupt */ int -octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, - u32 arg) +octeon_enable_irq(struct octeon_device *oct, u32 q_no) { - struct octeon_droq *droq; - - droq = oct->droq[q_no]; + switch (oct->chip_id) { + case OCTEON_CN66XX: + case OCTEON_CN68XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + unsigned long flags; + u32 value; - if (cmd == POLL_EVENT_PROCESS_PKTS) - return octeon_droq_process_poll_pkts(oct, droq, arg); + spin_lock_irqsave + (&cn6xxx->lock_for_droq_int_enb_reg, flags); + value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value); + value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value); - if (cmd == POLL_EVENT_PENDING_PKTS) { - u32 pkt_cnt = atomic_read(&droq->pkts_pending); + /* don't bother flushing the enables */ - return octeon_droq_process_packets(oct, droq, pkt_cnt); + spin_unlock_irqrestore + (&cn6xxx->lock_for_droq_int_enb_reg, flags); } - - if (cmd == POLL_EVENT_ENABLE_INTR) { - u32 value; - unsigned long flags; - - /* Enable Pkt Interrupt */ - switch (oct->chip_id) { - case OCTEON_CN66XX: - case OCTEON_CN68XX: { - struct octeon_cn6xxx *cn6xxx = - (struct octeon_cn6xxx *)oct->chip; - spin_lock_irqsave - (&cn6xxx->lock_for_droq_int_enb_reg, flags); - value = - octeon_read_csr(oct, - CN6XXX_SLI_PKT_TIME_INT_ENB); - value |= (1 << q_no); - octeon_write_csr(oct, - CN6XXX_SLI_PKT_TIME_INT_ENB, - value); - value = - octeon_read_csr(oct, - CN6XXX_SLI_PKT_CNT_INT_ENB); - value |= (1 << q_no); - octeon_write_csr(oct, - CN6XXX_SLI_PKT_CNT_INT_ENB, - value); - - /* don't bother flushing the enables */ - - spin_unlock_irqrestore - (&cn6xxx->lock_for_droq_int_enb_reg, flags); - return 0; - } break; - case OCTEON_CN23XX_PF_VID: { - lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); - } + case OCTEON_CN23XX_PF_VID: + lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); break; - case OCTEON_CN23XX_VF_VID: - lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); + case OCTEON_CN23XX_VF_VID: + lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); break; - } - return 0; + default: + dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__); + return 1; } - dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); - return -EINVAL; + return 0; } int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 815a9f56fd59..f28f262d4ab6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -123,11 +123,6 @@ struct oct_droq_stats { }; -#define POLL_EVENT_INTR_ARRIVED 1 -#define POLL_EVENT_PROCESS_PKTS 2 -#define POLL_EVENT_PENDING_PKTS 3 -#define POLL_EVENT_ENABLE_INTR 4 - /* The maximum number of buffers that can be dispatched from the * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that * max packet size from DROQ is 64K. @@ -414,8 +409,10 @@ int octeon_droq_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 budget); -int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, - int cmd, u32 arg); +int octeon_droq_process_poll_pkts(struct octeon_device *oct, + struct octeon_droq *droq, u32 budget); + +int octeon_enable_irq(struct octeon_device *oct, u32 q_no); void octeon_droq_check_oom(struct octeon_droq *droq); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 57af7df74ced..28e74ee23ff8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -87,7 +87,7 @@ int octeon_mbox_read(struct octeon_mbox *mbox) } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { - if (mbox->mbox_req.recv_len < msg.s.len) { + if (mbox->mbox_req.recv_len < mbox->mbox_req.msg.s.len) { ret = 0; } else { mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING; @@ -96,7 +96,8 @@ int octeon_mbox_read(struct octeon_mbox *mbox) } } else { if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { - if (mbox->mbox_resp.recv_len < msg.s.len) { + if (mbox->mbox_resp.recv_len < + mbox->mbox_resp.msg.s.len) { ret = 0; } else { mbox->state &= diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index f2d1a076a038..4069710796a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -35,6 +35,18 @@ #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 #define LIO_IFSTATE_RESETTING 0x10 +struct liquidio_if_cfg_context { + u32 octeon_id; + wait_queue_head_t wc; + int cond; +}; + +struct liquidio_if_cfg_resp { + u64 rh; + struct liquidio_if_cfg_info cfg_info; + u64 status; +}; + struct oct_nic_stats_resp { u64 rh; struct oct_link_stats stats; @@ -178,12 +190,21 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); +int lio_wait_for_clean_oq(struct octeon_device *oct); /** * \brief Register ethtool operations * @param netdev pointer to network device */ void liquidio_set_ethtool_ops(struct net_device *netdev); +/** + * \brief Net device change_mtu + * @param netdev network device + */ +int liquidio_change_mtu(struct net_device *netdev, int new_mtu); +#define LIO_CHANGE_MTU_SUCCESS 1 +#define LIO_CHANGE_MTU_FAIL 2 + #define SKB_ADJ_MASK 0x3F #define SKB_ADJ (SKB_ADJ_MASK + 1) @@ -486,4 +507,56 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) return 0; } +/** + * \brief Stop Tx queues + * @param netdev network device + */ +static inline void stop_txqs(struct net_device *netdev) +{ + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} + +/** + * \brief Wake Tx queues + * @param netdev network device + */ +static inline void wake_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i, qno; + + for (i = 0; i < netdev->num_tx_queues; i++) { + qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } +} + +/** + * \brief Start Tx queues + * @param netdev network device + */ +static inline void start_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i; + + if (lio->linfo.link.s.link_up) { + for (i = 0; i < netdev->num_tx_queues; i++) + netif_start_subqueue(netdev, i); + } +} + +static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + return skb->queue_mapping % lio->linfo.num_txpciq; +} + #endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index e07d2093b971..b1270355b0b1 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -366,6 +366,7 @@ int lio_process_iq_request_list(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) { + struct cavium_wq *cwq = &oct->dma_comp_wq; int reqtype; void *buf; u32 old = iq->flush_index; @@ -450,6 +451,10 @@ lio_process_iq_request_list(struct octeon_device *oct, bytes_compl); iq->flush_index = old; + if (atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count)) + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); + return inst_count; } @@ -623,7 +628,8 @@ octeon_prepare_soft_command(struct octeon_device *oct, pki_ih3->tag = LIO_CONTROL; pki_ih3->tagtype = ATOMIC_TAG; pki_ih3->qpg = - oct->instr_queue[sc->iq_no]->txpciq.s.qpg; + oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg; + pki_ih3->pm = 0x7; pki_ih3->sl = 8; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index 3d691c69f74d..fe5b53700576 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -49,7 +49,6 @@ int octeon_setup_response_list(struct octeon_device *oct) INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); cwq->wk.ctxptr = oct; oct->cmd_resp_state = OCT_DRV_ONLINE; - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); return ret; } @@ -164,5 +163,8 @@ static void oct_poll_req_completion(struct work_struct *work) struct cavium_wq *cwq = &oct->dma_comp_wq; lio_process_ordered_list(oct, 0); - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); + + if (atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count)) + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 4cacce5d2b16..5fc46c5a4f36 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -265,6 +265,22 @@ struct nicvf_drv_stats { struct cavium_ptp; +struct xcast_addr { + struct list_head list; + u64 addr; +}; + +struct xcast_addr_list { + struct list_head list; + int count; +}; + +struct nicvf_work { + struct delayed_work work; + u8 mode; + struct xcast_addr_list *mc; +}; + struct nicvf { struct nicvf *pnicvf; struct net_device *netdev; @@ -313,6 +329,7 @@ struct nicvf { struct nicvf_pfc pfc; struct tasklet_struct qs_err_task; struct work_struct reset_task; + struct nicvf_work rx_mode_work; /* PTP timestamp */ struct cavium_ptp *ptp_clock; @@ -403,6 +420,9 @@ struct nicvf { #define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ +#define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */ +#define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* Add MAC to DCAM filters */ +#define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST RX mode */ struct nic_cfg_msg { u8 msg; @@ -556,6 +576,14 @@ struct set_ptp { bool enable; }; +struct xcast { + u8 msg; + union { + u8 mode; + u64 mac; + } data; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -576,6 +604,7 @@ union nic_mbx { struct reset_stat_cfg reset_stat; struct pfc pfc; struct set_ptp ptp; + struct xcast xcast; }; #define NIC_NODE_ID_MASK 0x03 diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 7ff66a8194e2..55af04fa03a7 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -21,6 +21,8 @@ #define DRV_NAME "nicpf" #define DRV_VERSION "1.0" +#define NIC_VF_PER_MBX_REG 64 + struct hw_info { u8 bgx_cnt; u8 chans_per_lmac; @@ -1072,6 +1074,40 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_PTP_CFG: nic_config_timestamp(nic, vf, &mbx.ptp); break; + case NIC_MBOX_MSG_RESET_XCAST: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + bgx_reset_xcast_mode(nic->node, bgx, lmac, + vf < NIC_VF_PER_MBX_REG ? vf : + vf - NIC_VF_PER_MBX_REG); + break; + + case NIC_MBOX_MSG_ADD_MCAST: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + bgx_set_dmac_cam_filter(nic->node, bgx, lmac, + mbx.xcast.data.mac, + vf < NIC_VF_PER_MBX_REG ? vf : + vf - NIC_VF_PER_MBX_REG); + break; + + case NIC_MBOX_MSG_SET_XCAST: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); + break; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -1094,7 +1130,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) struct nicpf *nic = (struct nicpf *)nic_irq; int mbx; u64 intr; - u8 vf, vf_per_mbx_reg = 64; + u8 vf; if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) mbx = 0; @@ -1103,12 +1139,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); - for (vf = 0; vf < vf_per_mbx_reg; vf++) { + for (vf = 0; vf < NIC_VF_PER_MBX_REG; vf++) { if (intr & (1ULL << vf)) { dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", - vf + (mbx * vf_per_mbx_reg)); + vf + (mbx * NIC_VF_PER_MBX_REG)); - nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); + nic_handle_mbx_intr(nic, vf + + (mbx * NIC_VF_PER_MBX_REG)); nic_clear_mbx_intr(nic, vf, mbx); } } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index b68cde9f17d2..1e9a31fef729 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -21,6 +21,7 @@ #include <linux/bpf_trace.h> #include <linux/filter.h> #include <linux/net_tstamp.h> +#include <linux/workqueue.h> #include "nic_reg.h" #include "nic.h" @@ -63,14 +64,12 @@ module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug message level bitmap"); static int cpi_alg = CPI_ALG_NONE; -module_param(cpi_alg, int, S_IRUGO); +module_param(cpi_alg, int, 0444); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -struct nicvf_xdp_tx { - u64 dma_addr; - u8 qidx; -}; +/* workqueue for handling kernel ndo_set_rx_mode() calls */ +static struct workqueue_struct *nicvf_rx_mode_wq; static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { @@ -507,29 +506,14 @@ static int nicvf_init_resources(struct nicvf *nic) return 0; } -static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) -{ - /* Check if it's a recycled page, if not unmap the DMA mapping. - * Recycled page holds an extra reference. - */ - if (page_ref_count(page) == 1) { - dma_addr &= PAGE_MASK; - dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, - RCV_FRAG_LEN + XDP_HEADROOM, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - } -} - static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, struct cqe_rx_t *cqe_rx, struct snd_queue *sq, struct rcv_queue *rq, struct sk_buff **skb) { struct xdp_buff xdp; struct page *page; - struct nicvf_xdp_tx *xdp_tx = NULL; u32 action; - u16 len, err, offset = 0; + u16 len, offset = 0; u64 dma_addr, cpu_addr; void *orig_data; @@ -543,7 +527,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, cpu_addr = (u64)phys_to_virt(cpu_addr); page = virt_to_page((void *)cpu_addr); - xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; + xdp.data_hard_start = page_address(page); xdp.data = (void *)cpu_addr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; @@ -563,7 +547,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, switch (action) { case XDP_PASS: - nicvf_unmap_page(nic, page, dma_addr); + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } /* Build SKB and pass on packet to network stack */ *skb = build_skb(xdp.data, @@ -576,20 +571,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, case XDP_TX: nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); return true; - case XDP_REDIRECT: - /* Save DMA address for use while transmitting */ - xdp_tx = (struct nicvf_xdp_tx *)page_address(page); - xdp_tx->dma_addr = dma_addr; - xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); - - err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); - if (!err) - return true; - - /* Free the page on error */ - nicvf_unmap_page(nic, page, dma_addr); - put_page(page); - break; default: bpf_warn_invalid_xdp_action(action); /* fall through */ @@ -597,7 +578,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, trace_xdp_exception(nic->netdev, prog, action); /* fall through */ case XDP_DROP: - nicvf_unmap_page(nic, page, dma_addr); + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } put_page(page); return true; } @@ -1864,50 +1856,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) } } -static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) -{ - struct nicvf *nic = netdev_priv(netdev); - struct nicvf *snic = nic; - struct nicvf_xdp_tx *xdp_tx; - struct snd_queue *sq; - struct page *page; - int err, qidx; - - if (!netif_running(netdev) || !nic->xdp_prog) - return -EINVAL; - - page = virt_to_page(xdp->data); - xdp_tx = (struct nicvf_xdp_tx *)page_address(page); - qidx = xdp_tx->qidx; - - if (xdp_tx->qidx >= nic->xdp_tx_queues) - return -EINVAL; - - /* Get secondary Qset's info */ - if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { - qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; - snic = (struct nicvf *)nic->snicvf[qidx - 1]; - if (!snic) - return -EINVAL; - qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; - } - - sq = &snic->qs->sq[qidx]; - err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, - xdp_tx->dma_addr, - xdp->data_end - xdp->data); - if (err) - return -ENOMEM; - - nicvf_xdp_sq_doorbell(snic, sq, qidx); - return 0; -} - -static void nicvf_xdp_flush(struct net_device *dev) -{ - return; -} - static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct hwtstamp_config config; @@ -1975,6 +1923,100 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) } } +static void nicvf_set_rx_mode_task(struct work_struct *work_arg) +{ + struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, + work.work); + struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); + union nic_mbx mbx = {}; + struct xcast_addr *xaddr, *next; + + if (!vf_work) + return; + + /* From the inside of VM code flow we have only 128 bits memory + * available to send message to host's PF, so send all mc addrs + * one by one, starting from flush command in case if kernel + * requests to configure specific MAC filtering + */ + + /* flush DMAC filters and reset RX mode */ + mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; + nicvf_send_msg_to_pf(nic, &mbx); + + if (vf_work->mode & BGX_XCAST_MCAST_FILTER) { + /* once enabling filtering, we need to signal to PF to add + * its' own LMAC to the filter to accept packets for it. + */ + mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; + mbx.xcast.data.mac = 0; + nicvf_send_msg_to_pf(nic, &mbx); + } + + /* check if we have any specific MACs to be added to PF DMAC filter */ + if (vf_work->mc) { + /* now go through kernel list of MACs and add them one by one */ + list_for_each_entry_safe(xaddr, next, + &vf_work->mc->list, list) { + mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; + mbx.xcast.data.mac = xaddr->addr; + nicvf_send_msg_to_pf(nic, &mbx); + + /* after receiving ACK from PF release memory */ + list_del(&xaddr->list); + kfree(xaddr); + vf_work->mc->count--; + } + kfree(vf_work->mc); + } + + /* and finally set rx mode for PF accordingly */ + mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; + mbx.xcast.data.mode = vf_work->mode; + + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_set_rx_mode(struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + struct netdev_hw_addr *ha; + struct xcast_addr_list *mc_list = NULL; + u8 mode = 0; + + if (netdev->flags & IFF_PROMISC) { + mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT; + } else { + if (netdev->flags & IFF_BROADCAST) + mode |= BGX_XCAST_BCAST_ACCEPT; + + if (netdev->flags & IFF_ALLMULTI) { + mode |= BGX_XCAST_MCAST_ACCEPT; + } else if (netdev->flags & IFF_MULTICAST) { + mode |= BGX_XCAST_MCAST_FILTER; + /* here we need to copy mc addrs */ + if (netdev_mc_count(netdev)) { + struct xcast_addr *xaddr; + + mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC); + INIT_LIST_HEAD(&mc_list->list); + netdev_hw_addr_list_for_each(ha, &netdev->mc) { + xaddr = kmalloc(sizeof(*xaddr), + GFP_ATOMIC); + xaddr->addr = + ether_addr_to_u64(ha->addr); + list_add_tail(&xaddr->list, + &mc_list->list); + mc_list->count++; + } + } + } + } + nic->rx_mode_work.mc = mc_list; + nic->rx_mode_work.mode = mode; + queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ); +} + static const struct net_device_ops nicvf_netdev_ops = { .ndo_open = nicvf_open, .ndo_stop = nicvf_stop, @@ -1986,9 +2028,8 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_xdp_xmit = nicvf_xdp_xmit, - .ndo_xdp_flush = nicvf_xdp_flush, .ndo_do_ioctl = nicvf_ioctl, + .ndo_set_rx_mode = nicvf_set_rx_mode, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -2129,6 +2170,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); + INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); @@ -2167,6 +2210,8 @@ static void nicvf_remove(struct pci_dev *pdev) nic = netdev_priv(netdev); pnetdev = nic->pnicvf->netdev; + cancel_delayed_work_sync(&nic->rx_mode_work.work); + /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. */ @@ -2198,12 +2243,17 @@ static struct pci_driver nicvf_driver = { static int __init nicvf_init_module(void) { pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); - + nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", + WQ_MEM_RECLAIM); return pci_register_driver(&nicvf_driver); } static void __exit nicvf_cleanup_module(void) { + if (nicvf_rx_mode_wq) { + destroy_workqueue(nicvf_rx_mode_wq); + nicvf_rx_mode_wq = NULL; + } pci_unregister_driver(&nicvf_driver); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 3eae9ff9b53a..d42704d07484 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, /* Reserve space for header modifications by BPF program */ if (rbdr->is_xdp) - buf_len += XDP_HEADROOM; + buf_len += XDP_PACKET_HEADROOM; /* Check if it's recycled */ if (pgcache) @@ -224,9 +224,8 @@ ret: nic->rb_page = NULL; return -ENOMEM; } - if (pgcache) - pgcache->dma_addr = *rbuf + XDP_HEADROOM; + pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; nic->rb_page_offset += buf_len; } @@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, int qentry; if (subdesc_cnt > sq->xdp_free_cnt) - return -1; + return 0; qentry = nicvf_get_sq_desc(sq, subdesc_cnt); @@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, sq->xdp_desc_cnt += subdesc_cnt; - return 0; + return 1; } /* Calculate no of SQ subdescriptors needed to transmit all @@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, if (page_ref_count(page) != 1) return; - len += XDP_HEADROOM; + len += XDP_PACKET_HEADROOM; /* Receive buffers in XDP mode are mapped from page start */ dma_addr &= PAGE_MASK; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index ce1eed7a6d63..5e9a03cf1b4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -11,7 +11,6 @@ #include <linux/netdevice.h> #include <linux/iommu.h> -#include <linux/bpf.h> #include <net/xdp.h> #include "q_struct.h" @@ -94,9 +93,6 @@ #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */ -#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) - #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 91d34ea40e2c..5d08d2aeb172 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -24,9 +24,31 @@ #define DRV_NAME "thunder_bgx" #define DRV_VERSION "1.0" +/* RX_DMAC_CTL configuration */ +enum MCAST_MODE { + MCAST_MODE_REJECT = 0x0, + MCAST_MODE_ACCEPT = 0x1, + MCAST_MODE_CAM_FILTER = 0x2, + RSVD = 0x3 +}; + +#define BCAST_ACCEPT BIT(0) +#define CAM_ACCEPT BIT(3) +#define MCAST_MODE_MASK 0x3 +#define BGX_MCAST_MODE(x) (x << 1) + +struct dmac_map { + u64 vf_map; + u64 dmac; +}; + struct lmac { struct bgx *bgx; - int dmac; + /* actual number of DMACs configured */ + u8 dmacs_cfg; + /* overal number of possible DMACs could be configured per LMAC */ + u8 dmacs_count; + struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */ u8 mac[ETH_ALEN]; u8 lmac_type; u8 lane_to_sds; @@ -223,6 +245,163 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) } EXPORT_SYMBOL(bgx_set_lmac_mac); +static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid) +{ + struct lmac *lmac = NULL; + u8 idx = 0; + + lmac = &bgx->lmac[lmacid]; + /* reset CAM filters */ + for (idx = 0; idx < lmac->dmacs_count; idx++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + + ((lmacid * lmac->dmacs_count) + idx) * + sizeof(u64), 0); +} + +static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id) +{ + int i = 0; + + if (!lmac) + return; + + /* We've got reset filters request from some of attached VF, while the + * others might want to keep their configuration. So in this case lets + * iterate over all of configured filters and decrease number of + * referencies. if some addresses get zero refs remove them from list + */ + for (i = lmac->dmacs_cfg - 1; i >= 0; i--) { + lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id); + if (!lmac->dmacs[i].vf_map) { + lmac->dmacs_cfg--; + lmac->dmacs[i].dmac = 0; + lmac->dmacs[i].vf_map = 0; + } + } +} + +static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id) +{ + u8 i = 0; + + if (!lmac) + return -1; + + /* At the same time we could have several VFs 'attached' to some + * particular LMAC, and each VF is represented as network interface + * for kernel. So from user perspective it should be possible to + * manipulate with its' (VF) receive modes. However from PF + * driver perspective we need to keep track of filter configurations + * for different VFs to prevent filter values dupes + */ + for (i = 0; i < lmac->dmacs_cfg; i++) { + if (lmac->dmacs[i].dmac == dmac) { + lmac->dmacs[i].vf_map |= BIT_ULL(vf_id); + return -1; + } + } + + if (!(lmac->dmacs_cfg < lmac->dmacs_count)) + return -1; + + /* keep it for further tracking */ + lmac->dmacs[lmac->dmacs_cfg].dmac = dmac; + lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id); + lmac->dmacs_cfg++; + return 0; +} + +static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid, + u64 cam_dmac, u8 idx) +{ + struct lmac *lmac = NULL; + u64 cfg = 0; + + /* skip zero addresses as meaningless */ + if (!cam_dmac || !bgx) + return -1; + + lmac = &bgx->lmac[lmacid]; + + /* configure DCAM filtering for designated LMAC */ + cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) | + RX_DMACX_CAM_EN | cam_dmac; + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + + ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg); + return 0; +} + +void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, + u64 cam_dmac, u8 vf_id) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac = NULL; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + if (!cam_dmac) + cam_dmac = ether_addr_to_u64(lmac->mac); + + /* since we might have several VFs attached to particular LMAC + * and kernel could call mcast config for each of them with the + * same MAC, check if requested MAC is already in filtering list and + * updare/prepare list of MACs to be applied later to HW filters + */ + bgx_lmac_save_filter(lmac, cam_dmac, vf_id); +} +EXPORT_SYMBOL(bgx_set_dmac_cam_filter); + +void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac = NULL; + u64 cfg = 0; + u8 i = 0; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL); + if (mode & BGX_XCAST_BCAST_ACCEPT) + cfg |= BCAST_ACCEPT; + else + cfg &= ~BCAST_ACCEPT; + + /* disable all MCASTs and DMAC filtering */ + cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK)); + + /* check requested bits and set filtergin mode appropriately */ + if (mode & (BGX_XCAST_MCAST_ACCEPT)) { + cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT)); + } else if (mode & BGX_XCAST_MCAST_FILTER) { + cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT); + for (i = 0; i < lmac->dmacs_cfg; i++) + bgx_set_dmac_cam_filter_mac(bgx, lmacid, + lmac->dmacs[i].dmac, i); + } + bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg); +} +EXPORT_SYMBOL(bgx_set_xcast_mode); + +void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + + if (!bgx) + return; + + bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id); + bgx_flush_dmac_cam_filter(bgx, lmacid); + bgx_set_xcast_mode(node, bgx_idx, lmacid, + (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT)); +} +EXPORT_SYMBOL(bgx_reset_xcast_mode); + void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) { struct bgx *bgx = get_bgx(node, bgx_idx); @@ -468,18 +647,6 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) } EXPORT_SYMBOL(bgx_get_tx_stats); -static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) -{ - u64 offset; - - while (bgx->lmac[lmac].dmac > 0) { - offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + - (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); - bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); - bgx->lmac[lmac].dmac--; - } -} - /* Configure BGX LMAC in internal loopback mode */ void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable) @@ -912,6 +1079,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); } + /* actual number of filters available to exact LMAC */ + lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); + lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), + GFP_KERNEL); + /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); @@ -998,7 +1170,8 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) cfg &= ~CMR_EN; bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); - bgx_flush_dmac_addrs(bgx, lmacid); + bgx_flush_dmac_cam_filter(bgx, lmacid); + kfree(lmac->dmacs); if ((lmac->lmac_type != BGX_MODE_XFI) && (lmac->lmac_type != BGX_MODE_XLAUI) && diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 5a7567d31138..cbdd20b9ee6f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -30,6 +30,7 @@ #define DEFAULT_PAUSE_TIME 0xFFFF #define BGX_ID_MASK 0x3 +#define LMAC_ID_MASK 0x3 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 @@ -57,7 +58,7 @@ #define BGX_CMRX_RX_FIFO_LEN 0x108 #define BGX_CMR_RX_DMACX_CAM 0x200 #define RX_DMACX_CAM_EN BIT_ULL(48) -#define RX_DMACX_CAM_LMACID(x) (x << 49) +#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) #define RX_DMAC_COUNT 32 #define BGX_CMR_RX_STREERING 0x300 #define RX_TRAFFIC_STEER_RULE_COUNT 8 @@ -205,17 +206,13 @@ #define LMAC_INTR_LINK_UP BIT(0) #define LMAC_INTR_LINK_DOWN BIT(1) -/* RX_DMAC_CTL configuration*/ -enum MCAST_MODE { - MCAST_MODE_REJECT, - MCAST_MODE_ACCEPT, - MCAST_MODE_CAM_FILTER, - RSVD -}; - -#define BCAST_ACCEPT 1 -#define CAM_ACCEPT 1 +#define BGX_XCAST_BCAST_ACCEPT BIT(0) +#define BGX_XCAST_MCAST_ACCEPT BIT(1) +#define BGX_XCAST_MCAST_FILTER BIT(2) +void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf); +void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf); +void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode); void octeon_mdiobus_force_mod_depencency(void); void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 185fe8df7628..2edfdbdaae48 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -776,11 +776,11 @@ static ssize_t store_nservers(struct device *d, struct device_attribute *attr, #define CXGB3_ATTR_R(name, val_expr) \ CXGB3_SHOW(name, val_expr) \ -static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) +static DEVICE_ATTR(name, 0444, show_##name, NULL) #define CXGB3_ATTR_RW(name, val_expr, store_method) \ CXGB3_SHOW(name, val_expr) \ -static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) +static DEVICE_ATTR(name, 0644, show_##name, store_method) CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); @@ -859,7 +859,7 @@ static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ { \ return tm_attr_store(d, buf, len, sched); \ } \ -static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) +static DEVICE_ATTR(name, 0644, show_##name, store_##name) TM_ATTR(sched0, 0); TM_ATTR(sched1, 1); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index a89721fad633..080918af773c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -681,18 +681,18 @@ int t3_seeprom_wp(struct adapter *adapter, int enable) return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } -static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val) +static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val) { - char tok[len + 1]; + char tok[256]; memcpy(tok, s, len); tok[len] = 0; return kstrtouint(strim(tok), base, val); } -static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val) +static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val) { - char tok[len + 1]; + char tok[256]; memcpy(tok, s, len); tok[len] = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 53b6a02c778e..bea6a059a8f1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ - cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ + cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \ cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 557fd8bfd54e..9da6f57901a9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, if (is_t6(padap->params.chip)) { size = padap->params.cim_la_size / 10 + 1; - size *= 11 * sizeof(u32); + size *= 10 * sizeof(u32); } else { size = padap->params.cim_la_size / 8; size *= 8 * sizeof(u32); @@ -878,6 +878,86 @@ static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type, &payload->start, &payload->end); } +static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win, + int mtype, u32 addr, u32 len, void *hbuf) +{ + u32 win_pf, memoffset, mem_aperture, mem_base; + struct adapter *adap = pdbg_init->adap; + u32 pos, offset, resid; + u32 *res_buf; + u64 *buf; + int ret; + + /* Argument sanity checks ... + */ + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) + return -EINVAL; + + buf = (u64 *)hbuf; + + /* Try to do 64-bit reads. Residual will be handled later. */ + resid = len & 0x7; + len -= resid; + + ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base, + &mem_aperture); + if (ret) + return ret; + + addr = addr + memoffset; + win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); + + pos = addr & ~(mem_aperture - 1); + offset = addr - pos; + + /* Set up initial PCI-E Memory Window to cover the start of our + * transfer. + */ + t4_memory_update_win(adap, win, pos | win_pf); + + /* Transfer data from the adapter */ + while (len > 0) { + *buf++ = le64_to_cpu((__force __le64) + t4_read_reg64(adap, mem_base + offset)); + offset += sizeof(u64); + len -= sizeof(u64); + + /* If we've reached the end of our current window aperture, + * move the PCI-E Memory Window on to the next. + */ + if (offset == mem_aperture) { + pos += mem_aperture; + offset = 0; + t4_memory_update_win(adap, win, pos | win_pf); + } + } + + res_buf = (u32 *)buf; + /* Read residual in 32-bit multiples */ + while (resid > sizeof(u32)) { + *res_buf++ = le32_to_cpu((__force __le32) + t4_read_reg(adap, mem_base + offset)); + offset += sizeof(u32); + resid -= sizeof(u32); + + /* If we've reached the end of our current window aperture, + * move the PCI-E Memory Window on to the next. + */ + if (offset == mem_aperture) { + pos += mem_aperture; + offset = 0; + t4_memory_update_win(adap, win, pos | win_pf); + } + } + + /* Transfer residual < 32-bits */ + if (resid) + t4_memory_rw_residual(adap, resid, mem_base + offset, + (u8 *)res_buf, T4_MEMORY_READ); + + return 0; +} + #define CUDBG_YIELD_ITERATION 256 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, @@ -937,10 +1017,8 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, goto skip_read; spin_lock(&padap->win0_lock); - rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, - bytes_read, bytes, - (__be32 *)temp_buff.data, - 1); + rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type, + bytes_read, bytes, temp_buff.data); spin_unlock(&padap->win0_lock); if (rc) { cudbg_err->sys_err = rc; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 9040e13ce4b7..688f95440af2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -390,6 +390,8 @@ struct adapter_params { * used by the Port */ u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */ + bool write_w_imm_support; /* FW supports WRITE_WITH_IMMEDIATE */ + bool write_cmpl_support; /* FW supports WRITE_CMPL */ }; /* State needed to monitor the forward progress of SGE Ingress DMA activities @@ -831,6 +833,16 @@ struct vf_info { u16 vlan; }; +enum { + HMA_DMA_MAPPED_FLAG = 1 +}; + +struct hma_data { + unsigned char flags; + struct sg_table *sgt; + dma_addr_t *phy_addr; /* physical address of the page */ +}; + struct mbox_list { struct list_head list; }; @@ -907,6 +919,7 @@ struct adapter { struct work_struct tid_release_task; struct work_struct db_full_task; struct work_struct db_drop_task; + struct work_struct fatal_err_notify_task; bool tid_release_task_busy; /* lock for mailbox cmd list */ @@ -946,6 +959,11 @@ struct adapter { /* Ethtool Dump */ struct ethtool_dump eth_dump; + + /* HMA */ + struct hma_data hma; + + struct srq_data *srq; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1488,6 +1506,11 @@ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg); u32 t4_get_util_window(struct adapter *adap); void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window); +int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off, + u32 *mem_base, u32 *mem_aperture); +void t4_memory_update_win(struct adapter *adap, int win, u32 addr); +void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf, + int dir); #define T4_MEMORY_WRITE 0 #define T4_MEMORY_READ 1 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 30485f9a598f..143686c60234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_CIM_LA: if (is_t6(adap->params.chip)) { len = adap->params.cim_la_size / 10 + 1; - len *= 11 * sizeof(u32); + len *= 10 * sizeof(u32); } else { len = adap->params.cim_la_size / 8; len *= 8 * sizeof(u32); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 2822bbff73e8..251d5bdc972f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2617,7 +2617,7 @@ int mem_open(struct inode *inode, struct file *file) file->private_data = inode->i_private; - mem = (uintptr_t)file->private_data & 0x3; + mem = (uintptr_t)file->private_data & 0x7; adap = file->private_data - mem; (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH); @@ -2630,7 +2630,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, { loff_t pos = *ppos; loff_t avail = file_inode(file)->i_size; - unsigned int mem = (uintptr_t)file->private_data & 3; + unsigned int mem = (uintptr_t)file->private_data & 0x7; struct adapter *adap = file->private_data - mem; __be32 *data; int ret; @@ -2752,7 +2752,7 @@ DEFINE_SIMPLE_DEBUGFS_FILE(tid_info); static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { - debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root, + debugfs_create_file_size(name, 0400, adap->debugfs_root, (void *)adap + idx, &mem_debugfs_fops, size_mb << 20); } @@ -2947,65 +2947,65 @@ int t4_setup_debugfs(struct adapter *adap) struct dentry *de; static struct t4_debugfs_entry t4_debugfs_files[] = { - { "cim_la", &cim_la_fops, S_IRUSR, 0 }, - { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 }, - { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 }, - { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, - { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, - { "devlog", &devlog_fops, S_IRUSR, 0 }, - { "mboxlog", &mboxlog_fops, S_IRUSR, 0 }, - { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, - { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, - { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, - { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, - { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 }, - { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 }, - { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 }, - { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 }, - { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, - { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, - { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, - { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, - { "l2t", &t4_l2t_fops, S_IRUSR, 0}, - { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 }, - { "rss", &rss_debugfs_fops, S_IRUSR, 0 }, - { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 }, - { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 }, - { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 }, - { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 }, - { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 }, - { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 }, - { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 }, - { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 }, - { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 }, - { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 }, - { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 }, - { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 }, - { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 }, - { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 }, - { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 }, - { "obq_sge", &cim_obq_fops, S_IRUSR, 4 }, - { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 }, - { "tp_la", &tp_la_fops, S_IRUSR, 0 }, - { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 }, - { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 }, - { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 }, - { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 }, - { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 }, + { "cim_la", &cim_la_fops, 0400, 0 }, + { "cim_pif_la", &cim_pif_la_fops, 0400, 0 }, + { "cim_ma_la", &cim_ma_la_fops, 0400, 0 }, + { "cim_qcfg", &cim_qcfg_fops, 0400, 0 }, + { "clk", &clk_debugfs_fops, 0400, 0 }, + { "devlog", &devlog_fops, 0400, 0 }, + { "mboxlog", &mboxlog_fops, 0400, 0 }, + { "mbox0", &mbox_debugfs_fops, 0600, 0 }, + { "mbox1", &mbox_debugfs_fops, 0600, 1 }, + { "mbox2", &mbox_debugfs_fops, 0600, 2 }, + { "mbox3", &mbox_debugfs_fops, 0600, 3 }, + { "mbox4", &mbox_debugfs_fops, 0600, 4 }, + { "mbox5", &mbox_debugfs_fops, 0600, 5 }, + { "mbox6", &mbox_debugfs_fops, 0600, 6 }, + { "mbox7", &mbox_debugfs_fops, 0600, 7 }, + { "trace0", &mps_trc_debugfs_fops, 0600, 0 }, + { "trace1", &mps_trc_debugfs_fops, 0600, 1 }, + { "trace2", &mps_trc_debugfs_fops, 0600, 2 }, + { "trace3", &mps_trc_debugfs_fops, 0600, 3 }, + { "l2t", &t4_l2t_fops, 0400, 0}, + { "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 }, + { "rss", &rss_debugfs_fops, 0400, 0 }, + { "rss_config", &rss_config_debugfs_fops, 0400, 0 }, + { "rss_key", &rss_key_debugfs_fops, 0400, 0 }, + { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, + { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, + { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 }, + { "ibq_tp0", &cim_ibq_fops, 0400, 0 }, + { "ibq_tp1", &cim_ibq_fops, 0400, 1 }, + { "ibq_ulp", &cim_ibq_fops, 0400, 2 }, + { "ibq_sge0", &cim_ibq_fops, 0400, 3 }, + { "ibq_sge1", &cim_ibq_fops, 0400, 4 }, + { "ibq_ncsi", &cim_ibq_fops, 0400, 5 }, + { "obq_ulp0", &cim_obq_fops, 0400, 0 }, + { "obq_ulp1", &cim_obq_fops, 0400, 1 }, + { "obq_ulp2", &cim_obq_fops, 0400, 2 }, + { "obq_ulp3", &cim_obq_fops, 0400, 3 }, + { "obq_sge", &cim_obq_fops, 0400, 4 }, + { "obq_ncsi", &cim_obq_fops, 0400, 5 }, + { "tp_la", &tp_la_fops, 0400, 0 }, + { "ulprx_la", &ulprx_la_fops, 0400, 0 }, + { "sensors", &sensors_debugfs_fops, 0400, 0 }, + { "pm_stats", &pm_stats_debugfs_fops, 0400, 0 }, + { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 }, + { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 }, #if IS_ENABLED(CONFIG_IPV6) - { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, + { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 }, #endif - { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, - { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, - { "meminfo", &meminfo_fops, S_IRUSR, 0 }, - { "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 }, + { "tids", &tid_info_debugfs_fops, 0400, 0}, + { "blocked_fl", &blocked_fl_fops, 0600, 0 }, + { "meminfo", &meminfo_fops, 0400, 0 }, + { "crypto", &chcr_stats_debugfs_fops, 0400, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. */ static struct t4_debugfs_entry t5_debugfs_files[] = { - { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 }, - { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 }, + { "obq_sge_rx_q0", &cim_obq_fops, 0400, 6 }, + { "obq_sge_rx_q1", &cim_obq_fops, 0400, 7 }, }; add_debugfs_files(adap, @@ -3042,13 +3042,19 @@ int t4_setup_debugfs(struct adapter *adap) add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); } + + if (i & HMA_MUX_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + add_debugfs_mem(adap, "hma", MEM_HMA, + EXT_MEM1_SIZE_G(size)); + } } - de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, + de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); - debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR, + debugfs_create_bool("use_backdoor", 0600, adap->debugfs_root, &adap->use_bd); - debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR, + debugfs_create_bool("trace_rss", 0600, adap->debugfs_root, &adap->trace_rss); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7852d98bad75..59d04d73c672 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -597,22 +597,22 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_KR: SET_LMM(Backplane); - SET_LMM(10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break; case FW_PORT_TYPE_BP_AP: SET_LMM(Backplane); - SET_LMM(10000baseR_FEC); - SET_LMM(10000baseKR_Full); - SET_LMM(1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break; case FW_PORT_TYPE_BP4_AP: SET_LMM(Backplane); - SET_LMM(10000baseR_FEC); - SET_LMM(10000baseKR_Full); - SET_LMM(1000baseKX_Full); - SET_LMM(10000baseKX4_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); break; case FW_PORT_TYPE_FIBER_XFI: @@ -628,7 +628,9 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_QSFP: SET_LMM(FIBRE); - SET_LMM(40000baseSR4_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); break; case FW_PORT_TYPE_CR_QSFP: @@ -655,12 +657,14 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); - SET_LMM(50000baseSR2_Full); + FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full); break; case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full); FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 3177b0c9bd2d..db92f1858060 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1335,12 +1335,6 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, return ret; } - /* Clear out any old resources being used by the filter before - * we start constructing the new filter. - */ - if (f->valid) - clear_filter(adapter, f); - if (is_t6(adapter->params.chip) && fs->type && ipv6_addr_type((const struct in6_addr *)fs->val.lip) != IPV6_ADDR_ANY) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 56bc626ef006..24d2865b8806 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -75,6 +75,7 @@ #include "t4fw_api.h" #include "t4fw_version.h" #include "cxgb4_dcb.h" +#include "srq.h" #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" @@ -210,6 +211,9 @@ static void link_report(struct net_device *dev) case 40000: s = "40Gbps"; break; + case 50000: + s = "50Gbps"; + break; case 100000: s = "100Gbps"; break; @@ -583,6 +587,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_abort_rpl_rss *p = (void *)rsp; hash_del_filter_rpl(q->adap, p); + } else if (opcode == CPL_SRQ_TABLE_RPL) { + const struct cpl_srq_table_rpl *p = (void *)rsp; + + do_srq_table_rpl(q->adap, p); } else dev_err(q->adap->pdev_dev, "unexpected CPL %#x on FW event queue\n", opcode); @@ -833,8 +841,6 @@ static int setup_fw_sge_queues(struct adapter *adap) err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], adap->msi_idx, NULL, fwevtq_handler, NULL, -1); - if (err) - t4_free_sge_resources(adap); return err; } @@ -1733,10 +1739,11 @@ EXPORT_SYMBOL(cxgb4_sync_txq_pidx); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { - struct adapter *adap; - u32 offset, memtype, memaddr; u32 edc0_size, edc1_size, mc0_size, mc1_size, size; u32 edc0_end, edc1_end, mc0_end, mc1_end; + u32 offset, memtype, memaddr; + struct adapter *adap; + u32 hma_size = 0; int ret; adap = netdev2adap(dev); @@ -1756,6 +1763,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); mc0_size = EXT_MEM0_SIZE_G(size) << 20; + if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + hma_size = EXT_MEM1_SIZE_G(size) << 20; + } edc0_end = edc0_size; edc1_end = edc0_end + edc1_size; mc0_end = edc1_end + mc0_size; @@ -1767,7 +1778,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) memtype = MEM_EDC1; memaddr = offset - edc0_end; } else { - if (offset < mc0_end) { + if (hma_size && (offset < (edc1_end + hma_size))) { + memtype = MEM_HMA; + memaddr = offset - edc1_end; + } else if (offset < mc0_end) { memtype = MEM_MC0; memaddr = offset - edc1_end; } else if (is_t5(adap->params.chip)) { @@ -2681,13 +2695,17 @@ static int cxgb4_mgmt_get_vf_config(struct net_device *dev, { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct vf_info *vfinfo; if (vf >= adap->num_vfs) return -EINVAL; + vfinfo = &adap->vfinfo[vf]; + ivi->vf = vf; - ivi->max_tx_rate = adap->vfinfo[vf].tx_rate; + ivi->max_tx_rate = vfinfo->tx_rate; ivi->min_tx_rate = 0; - ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); + ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); + ivi->vlan = vfinfo->vlan; return 0; } @@ -2870,11 +2888,11 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) /* Convert from Mbps to Kbps */ req_rate = rate << 10; - /* Max rate is 10 Gbps */ + /* Max rate is 100 Gbps */ if (req_rate >= SCHED_MAX_RATE_KBPS) { dev_err(adap->pdev_dev, - "Invalid rate %u Mbps, Max rate is %u Gbps\n", - rate, SCHED_MAX_RATE_KBPS); + "Invalid rate %u Mbps, Max rate is %u Mbps\n", + rate, SCHED_MAX_RATE_KBPS >> 10); return -ERANGE; } @@ -3244,6 +3262,14 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { .get_drvinfo = cxgb4_mgmt_get_drvinfo, }; +static void notify_fatal_err(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, fatal_err_notify_task); + notify_ulds(adap, CXGB4_STATE_FATAL_ERROR); +} + void t4_fatal_err(struct adapter *adap) { int port; @@ -3268,6 +3294,7 @@ void t4_fatal_err(struct adapter *adap) netif_carrier_off(dev); } dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); + queue_work(adap->workq, &adap->fatal_err_notify_task); } static void setup_memwin(struct adapter *adap) @@ -3298,6 +3325,206 @@ static void setup_memwin_rdma(struct adapter *adap) } } +/* HMA Definitions */ + +/* The maximum number of address that can be send in a single FW cmd */ +#define HMA_MAX_ADDR_IN_CMD 5 + +#define HMA_PAGE_SIZE PAGE_SIZE + +#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */ + +#define HMA_PAGE_ORDER \ + ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \ + ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0) + +/* The minimum and maximum possible HMA sizes that can be specified in the FW + * configuration(in units of MB). + */ +#define HMA_MIN_TOTAL_SIZE 1 +#define HMA_MAX_TOTAL_SIZE \ + (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \ + HMA_MAX_NO_FW_ADDRESS) >> 20) + +static void adap_free_hma_mem(struct adapter *adapter) +{ + struct scatterlist *iter; + struct page *page; + int i; + + if (!adapter->hma.sgt) + return; + + if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { + dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, + adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); + adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; + } + + for_each_sg(adapter->hma.sgt->sgl, iter, + adapter->hma.sgt->orig_nents, i) { + page = sg_page(iter); + if (page) + __free_pages(page, HMA_PAGE_ORDER); + } + + kfree(adapter->hma.phy_addr); + sg_free_table(adapter->hma.sgt); + kfree(adapter->hma.sgt); + adapter->hma.sgt = NULL; +} + +static int adap_config_hma(struct adapter *adapter) +{ + struct scatterlist *sgl, *iter; + struct sg_table *sgt; + struct page *newpage; + unsigned int i, j, k; + u32 param, hma_size; + unsigned int ncmds; + size_t page_size; + u32 page_order; + int node, ret; + + /* HMA is supported only for T6+ cards. + * Avoid initializing HMA in kdump kernels. + */ + if (is_kdump_kernel() || + CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) + return 0; + + /* Get the HMA region size required by fw */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, ¶m, &hma_size); + /* An error means card has its own memory or HMA is not supported by + * the firmware. Return without any errors. + */ + if (ret || !hma_size) + return 0; + + if (hma_size < HMA_MIN_TOTAL_SIZE || + hma_size > HMA_MAX_TOTAL_SIZE) { + dev_err(adapter->pdev_dev, + "HMA size %uMB beyond bounds(%u-%lu)MB\n", + hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE); + return -EINVAL; + } + + page_size = HMA_PAGE_SIZE; + page_order = HMA_PAGE_ORDER; + adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL); + if (unlikely(!adapter->hma.sgt)) { + dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n"); + return -ENOMEM; + } + sgt = adapter->hma.sgt; + /* FW returned value will be in MB's + */ + sgt->orig_nents = (hma_size << 20) / (page_size << page_order); + if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { + dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n"); + kfree(adapter->hma.sgt); + adapter->hma.sgt = NULL; + return -ENOMEM; + } + + sgl = adapter->hma.sgt->sgl; + node = dev_to_node(adapter->pdev_dev); + for_each_sg(sgl, iter, sgt->orig_nents, i) { + newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL, + page_order); + if (!newpage) { + dev_err(adapter->pdev_dev, + "Not enough memory for HMA page allocation\n"); + ret = -ENOMEM; + goto free_hma; + } + sg_set_page(iter, newpage, page_size << page_order, 0); + } + + sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, + DMA_BIDIRECTIONAL); + if (!sgt->nents) { + dev_err(adapter->pdev_dev, + "Not enough memory for HMA DMA mapping"); + ret = -ENOMEM; + goto free_hma; + } + adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; + + adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t), + GFP_KERNEL); + if (unlikely(!adapter->hma.phy_addr)) + goto free_hma; + + for_each_sg(sgl, iter, sgt->nents, i) { + newpage = sg_page(iter); + adapter->hma.phy_addr[i] = sg_dma_address(iter); + } + + ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); + /* Pass on the addresses to firmware */ + for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) { + struct fw_hma_cmd hma_cmd; + u8 naddr = HMA_MAX_ADDR_IN_CMD; + u8 soc = 0, eoc = 0; + u8 hma_mode = 1; /* Presently we support only Page table mode */ + + soc = (i == 0) ? 1 : 0; + eoc = (i == ncmds - 1) ? 1 : 0; + + /* For last cmd, set naddr corresponding to remaining + * addresses + */ + if (i == ncmds - 1) { + naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; + naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD; + } + memset(&hma_cmd, 0, sizeof(hma_cmd)); + hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd)); + + hma_cmd.mode_to_pcie_params = + htonl(FW_HMA_CMD_MODE_V(hma_mode) | + FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc)); + + /* HMA cmd size specified in MB's */ + hma_cmd.naddr_size = + htonl(FW_HMA_CMD_SIZE_V(hma_size) | + FW_HMA_CMD_NADDR_V(naddr)); + + /* Total Page size specified in units of 4K */ + hma_cmd.addr_size_pkd = + htonl(FW_HMA_CMD_ADDR_SIZE_V + ((page_size << page_order) >> 12)); + + /* Fill the 5 addresses */ + for (j = 0; j < naddr; j++) { + hma_cmd.phy_address[j] = + cpu_to_be64(adapter->hma.phy_addr[j + k]); + } + ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd, + sizeof(hma_cmd), &hma_cmd); + if (ret) { + dev_err(adapter->pdev_dev, + "HMA FW command failed with err %d\n", ret); + goto free_hma; + } + } + + if (!ret) + dev_info(adapter->pdev_dev, + "Reserved %uMB host memory for HMA\n", hma_size); + return ret; + +free_hma: + adap_free_hma_mem(adapter); + return ret; +} + static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) { u32 v; @@ -3751,6 +3978,12 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; + /* We will proceed even if HMA init fails. */ + ret = adap_config_hma(adapter); + if (ret) + dev_err(adapter->pdev_dev, + "HMA configuration failed with error %d\n", ret); + /* * And finally tell the firmware to initialize itself using the * parameters from the Configuration File. @@ -3957,6 +4190,11 @@ static int adap_init0(struct adapter *adap) * effect. Otherwise, it's time to try initializing the adapter. */ if (state == DEV_STATE_INIT) { + ret = adap_config_hma(adap); + if (ret) + dev_err(adap->pdev_dev, + "HMA configuration failed with error %d\n", + ret); dev_info(adap->pdev_dev, "Coming up as %s: "\ "Adapter already initialized\n", adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); @@ -4211,7 +4449,8 @@ static int adap_init0(struct adapter *adap) adap->params.ofldq_wr_cred = val[5]; if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) { - if (init_hash_filter(adap) < 0) + ret = init_hash_filter(adap); + if (ret < 0) goto bye; } else { adap->params.offload = 1; @@ -4236,6 +4475,20 @@ static int adap_init0(struct adapter *adap) adap->vres.pbl.start = val[4]; adap->vres.pbl.size = val[5] - val[4] + 1; + params[0] = FW_PARAM_PFVF(SRQ_START); + params[1] = FW_PARAM_PFVF(SRQ_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (!ret) { + adap->vres.srq.start = val[0]; + adap->vres.srq.size = val[1] - val[0] + 1; + } + if (adap->vres.srq.size) { + adap->srq = t4_init_srq(adap->vres.srq.size); + if (!adap->srq) + dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); + } + params[0] = FW_PARAM_PFVF(SQRQ_START); params[1] = FW_PARAM_PFVF(SQRQ_END); params[2] = FW_PARAM_PFVF(CQ_START); @@ -4269,6 +4522,18 @@ static int adap_init0(struct adapter *adap) "max_ordird_qp %d max_ird_adapter %d\n", adap->params.max_ordird_qp, adap->params.max_ird_adapter); + + /* Enable write_with_immediate if FW supports it */ + params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, + val); + adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); + + /* Enable write_cmpl if FW supports it */ + params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, + val); + adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); adap->num_ofld_uld += 2; } if (caps_cmd.iscsicaps) { @@ -4284,18 +4549,32 @@ static int adap_init0(struct adapter *adap) adap->num_ofld_uld += 2; } if (caps_cmd.cryptocaps) { - /* Should query params here...TODO */ - params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE); - ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, - params, val); - if (ret < 0) { - if (ret != -EINVAL) + if (ntohs(caps_cmd.cryptocaps) & + FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) { + params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 2, params, val); + if (ret < 0) { + if (ret != -EINVAL) + goto bye; + } else { + adap->vres.ncrypto_fc = val[0]; + } + adap->num_ofld_uld += 1; + } + if (ntohs(caps_cmd.cryptocaps) & + FW_CAPS_CONFIG_TLS_INLINE) { + params[0] = FW_PARAM_PFVF(TLS_START); + params[1] = FW_PARAM_PFVF(TLS_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 2, params, val); + if (ret < 0) goto bye; - } else { - adap->vres.ncrypto_fc = val[0]; + adap->vres.key.start = val[0]; + adap->vres.key.size = val[1] - val[0] + 1; + adap->num_uld += 1; } adap->params.crypto = ntohs(caps_cmd.cryptocaps); - adap->num_uld += 1; } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV @@ -4346,6 +4625,7 @@ static int adap_init0(struct adapter *adap) * happened to HW/FW, stop issuing commands. */ bye: + adap_free_hma_mem(adap); kfree(adap->sge.egr_map); kfree(adap->sge.ingr_map); kfree(adap->sge.starving_fl); @@ -4903,6 +5183,7 @@ static void free_some_resources(struct adapter *adapter) kvfree(adapter->smt); kvfree(adapter->l2t); + kvfree(adapter->srq); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); cxgb4_cleanup_tc_flower(adapter); @@ -4970,7 +5251,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->needs_free_netdev = true; } static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) @@ -4982,9 +5262,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) pcie_fw = readl(adap->regs + PCIE_FW_A); /* Check if cxgb4 is the MASTER and fw is initialized */ - if (!(pcie_fw & PCIE_FW_INIT_F) || + if (num_vfs && + (!(pcie_fw & PCIE_FW_INIT_F) || !(pcie_fw & PCIE_FW_MASTER_VLD_F) || - PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { + PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { dev_warn(&pdev->dev, "cxgb4 driver needs to be MASTER to support SRIOV\n"); return -EOPNOTSUPP; @@ -5180,6 +5461,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->name = pci_name(pdev); adapter->mbox = func; adapter->pf = func; + adapter->params.chip = chip; + adapter->adap_idx = adap_idx; adapter->msg_enable = DFLT_MSG_ENABLE; adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + (sizeof(struct mbox_cmd) * @@ -5255,6 +5538,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); INIT_WORK(&adapter->db_drop_task, process_db_drop); + INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); err = t4_prep_adapter(adapter); if (err) @@ -5472,6 +5756,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_dev; + err = setup_fw_sge_queues(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "FW sge queue allocation failed, err %d", err); + goto out_free_dev; + } + /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only @@ -5520,10 +5811,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cxgb4_ptp_init(adapter); print_adapter_info(adapter); - setup_fw_sge_queues(adapter); return 0; out_free_dev: + t4_free_sge_resources(adapter); free_some_resources(adapter); if (adapter->flags & USING_MSIX) free_msix_info(adapter); @@ -5572,6 +5863,8 @@ static void remove_one(struct pci_dev *pdev) t4_uld_clean_up(adapter); } + adap_free_hma_mem(adapter); + disable_interrupts(adapter); for_each_port(adapter, i) @@ -5599,24 +5892,24 @@ static void remove_one(struct pci_dev *pdev) #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); #endif - iounmap(adapter->regs); if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); - pci_disable_pcie_error_reporting(pdev); - if ((adapter->flags & DEV_ENABLED)) { - pci_disable_device(pdev); - adapter->flags &= ~DEV_ENABLED; - } - pci_release_regions(pdev); - kfree(adapter->mbox_log); - synchronize_rcu(); - kfree(adapter); } #ifdef CONFIG_PCI_IOV else { cxgb4_iov_configure(adapter->pdev, 0); } #endif + iounmap(adapter->regs); + pci_disable_pcie_error_reporting(pdev); + if ((adapter->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adapter->flags &= ~DEV_ENABLED; + } + pci_release_regions(pdev); + kfree(adapter->mbox_log); + synchronize_rcu(); + kfree(adapter); } /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 6b5fea4532f3..a95cde0fadf7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + adap->sge.uld_rxq_info[uld_type] = NULL; kfree(rxq_info->rspq_id); kfree(rxq_info->uldrxq); kfree(rxq_info); @@ -665,6 +666,8 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; lld->nodeid = dev_to_node(adap->pdev_dev); lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; + lld->write_w_imm_support = adap->params.write_w_imm_support; + lld->write_cmpl_support = adap->params.write_cmpl_support; } static void uld_attach(struct adapter *adap, unsigned int uld) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index a14e8db51cdc..de9ad311dacd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -237,6 +237,7 @@ enum cxgb4_uld { CXGB4_ULD_ISCSI, CXGB4_ULD_ISCSIT, CXGB4_ULD_CRYPTO, + CXGB4_ULD_TLS, CXGB4_ULD_MAX }; @@ -257,7 +258,8 @@ enum cxgb4_state { CXGB4_STATE_UP, CXGB4_STATE_START_RECOVERY, CXGB4_STATE_DOWN, - CXGB4_STATE_DETACH + CXGB4_STATE_DETACH, + CXGB4_STATE_FATAL_ERROR }; enum cxgb4_control { @@ -283,10 +285,12 @@ struct cxgb4_virt_res { /* virtualized HW resources */ struct cxgb4_range iscsi; struct cxgb4_range stag; struct cxgb4_range rq; + struct cxgb4_range srq; struct cxgb4_range pbl; struct cxgb4_range qp; struct cxgb4_range cq; struct cxgb4_range ocq; + struct cxgb4_range key; unsigned int ncrypto_fc; }; @@ -298,6 +302,9 @@ struct chcr_stats_debug { atomic_t error; atomic_t fallback; atomic_t ipsec_cnt; + atomic_t tls_pdu_tx; + atomic_t tls_pdu_rx; + atomic_t tls_key; }; #define OCQ_WIN_OFFSET(pdev, vres) \ @@ -352,6 +359,8 @@ struct cxgb4_lld_info { void **iscsi_ppm; /* iscsi page pod manager */ int nodeid; /* device numa node id */ bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */ + bool write_w_imm_support; /* FW supports WRITE_WITH_IMMEDIATE */ + bool write_cmpl_support; /* FW supports WRITE_CMPL WR */ }; struct cxgb4_uld_info { @@ -378,6 +387,8 @@ struct cxgb4_uld_info { int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, + const void *src, unsigned int len); int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 77b2b3fd9021..3a49e00a38a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -42,8 +42,8 @@ #define FW_SCHED_CLS_NONE 0xffffffff -/* Max rate that can be set to a scheduling class is 10 Gbps */ -#define SCHED_MAX_RATE_KBPS 10000000U +/* Max rate that can be set to a scheduling class is 100 Gbps */ +#define SCHED_MAX_RATE_KBPS 100000000U enum { SCHED_STATE_ACTIVE, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6e310a0da7c9..1a28df137e1f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1019,8 +1019,8 @@ EXPORT_SYMBOL(cxgb4_ring_tx_db); void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, void *pos) { - u64 *p; int left = (void *)q->stat - pos; + u64 *p; if (likely(skb->len <= left)) { if (likely(!skb->data_len)) @@ -1735,15 +1735,13 @@ static void txq_stop_maperr(struct sge_uld_txq *q) /** * ofldtxq_stop - stop an offload Tx queue that has become full * @q: the queue to stop - * @skb: the packet causing the queue to become full + * @wr: the Work Request causing the queue to become full * * Stops an offload Tx queue that has become full and modifies the packet * being written to request a wakeup. */ -static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb) +static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) { - struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; - wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); q->q.stops++; q->full = 1; @@ -1804,7 +1802,7 @@ static void service_ofldq(struct sge_uld_txq *q) credits = txq_avail(&q->q) - ndesc; BUG_ON(credits < 0); if (unlikely(credits < TXQ_STOP_THRES)) - ofldtxq_stop(q, skb); + ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); pos = (u64 *)&q->q.desc[q->q.pidx]; if (is_ofld_imm(skb)) @@ -2005,6 +2003,103 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) } EXPORT_SYMBOL(cxgb4_ofld_send); +static void *inline_tx_header(const void *src, + const struct sge_txq *q, + void *pos, int length) +{ + int left = (void *)q->stat - pos; + u64 *p; + + if (likely(length <= left)) { + memcpy(pos, src, length); + pos += length; + } else { + memcpy(pos, src, left); + memcpy(q->desc, src + left, length - left); + pos = (void *)q->desc + (length - left); + } + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) { + *p = 0; + return p + 1; + } + return p; +} + +/** + * ofld_xmit_direct - copy a WR into offload queue + * @q: the Tx offload queue + * @src: location of WR + * @len: WR length + * + * Copy an immediate WR into an uncontended SGE offload queue. + */ +static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, + unsigned int len) +{ + unsigned int ndesc; + int credits; + u64 *pos; + + /* Use the lower limit as the cut-off */ + if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { + WARN_ON(1); + return NET_XMIT_DROP; + } + + /* Don't return NET_XMIT_CN here as the current + * implementation doesn't queue the request + * using an skb when the following conditions not met + */ + if (!spin_trylock(&q->sendq.lock)) + return NET_XMIT_DROP; + + if (q->full || !skb_queue_empty(&q->sendq) || + q->service_ofldq_running) { + spin_unlock(&q->sendq.lock); + return NET_XMIT_DROP; + } + ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); + credits = txq_avail(&q->q) - ndesc; + pos = (u64 *)&q->q.desc[q->q.pidx]; + + /* ofldtxq_stop modifies WR header in-situ */ + inline_tx_header(src, &q->q, pos, len); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, (struct fw_wr_hdr *)pos); + txq_advance(&q->q, ndesc); + cxgb4_ring_tx_db(q->adap, &q->q, ndesc); + + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, + const void *src, unsigned int len) +{ + struct sge_uld_txq_info *txq_info; + struct sge_uld_txq *txq; + struct adapter *adap; + int ret; + + adap = netdev2adap(dev); + + local_bh_disable(); + txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + if (unlikely(!txq_info)) { + WARN_ON(true); + local_bh_enable(); + return NET_XMIT_DROP; + } + txq = &txq_info->uldtxq[idx]; + + ret = ofld_xmit_direct(txq, src, len); + local_bh_enable(); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_immdata_send); + /** * t4_crypto_send - send crypto packet * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c new file mode 100644 index 000000000000..6228a5708307 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c @@ -0,0 +1,138 @@ +/* + * This file is part of the Chelsio T6 Ethernet driver for Linux. + * + * Copyright (c) 2017-2018 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "cxgb4.h" +#include "t4_msg.h" +#include "srq.h" + +struct srq_data *t4_init_srq(int srq_size) +{ + struct srq_data *s; + + s = kvzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return NULL; + + s->srq_size = srq_size; + init_completion(&s->comp); + mutex_init(&s->lock); + + return s; +} + +/* cxgb4_get_srq_entry: read the SRQ table entry + * @dev: Pointer to the net_device + * @idx: Index to the srq + * @entryp: pointer to the srq entry + * + * Sends CPL_SRQ_TABLE_REQ message for the given index. + * Contents will be returned in CPL_SRQ_TABLE_RPL message. + * + * Returns zero if the read is successful, else a error + * number will be returned. Caller should not use the srq + * entry if the return value is non-zero. + * + * + */ +int cxgb4_get_srq_entry(struct net_device *dev, + int srq_idx, struct srq_entry *entryp) +{ + struct cpl_srq_table_req *req; + struct adapter *adap; + struct sk_buff *skb; + struct srq_data *s; + int rc = -ENODEV; + + adap = netdev2adap(dev); + s = adap->srq; + + if (!(adap->flags & FULL_INIT_DONE) || !s) + goto out; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + req = (struct cpl_srq_table_req *) + __skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, + TID_TID_V(srq_idx) | + TID_QID_V(adap->sge.fw_evtq.abs_id))); + req->idx = srq_idx; + + mutex_lock(&s->lock); + + s->entryp = entryp; + t4_mgmt_tx(adap, skb); + + rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO); + if (rc) + rc = 0; + else /* !rc means we timed out */ + rc = -ETIMEDOUT; + + WARN_ON_ONCE(entryp->idx != srq_idx); + mutex_unlock(&s->lock); +out: + return rc; +} +EXPORT_SYMBOL(cxgb4_get_srq_entry); + +void do_srq_table_rpl(struct adapter *adap, + const struct cpl_srq_table_rpl *rpl) +{ + unsigned int idx = TID_TID_G(GET_TID(rpl)); + struct srq_data *s = adap->srq; + struct srq_entry *e; + + if (unlikely(rpl->status != CPL_CONTAINS_READ_RPL)) { + dev_err(adap->pdev_dev, + "Unexpected SRQ_TABLE_RPL status %u for entry %u\n", + rpl->status, idx); + goto out; + } + + /* Store the read entry */ + e = s->entryp; + e->valid = 1; + e->idx = idx; + e->pdid = SRQT_PDID_G(be64_to_cpu(rpl->rsvd_pdid)); + e->qlen = SRQT_QLEN_G(be32_to_cpu(rpl->qlen_qbase)); + e->qbase = SRQT_QBASE_G(be32_to_cpu(rpl->qlen_qbase)); + e->cur_msn = be16_to_cpu(rpl->cur_msn); + e->max_msn = be16_to_cpu(rpl->max_msn); +out: + complete(&s->comp); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.h b/drivers/net/ethernet/chelsio/cxgb4/srq.h new file mode 100644 index 000000000000..ec85cf93865a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.h @@ -0,0 +1,65 @@ +/* + * This file is part of the Chelsio T6 Ethernet driver for Linux. + * + * Copyright (c) 2017-2018 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_SRQ_H +#define __CXGB4_SRQ_H + +struct adapter; +struct cpl_srq_table_rpl; + +#define SRQ_WAIT_TO (HZ * 5) + +struct srq_entry { + u8 valid; + u8 idx; + u8 qlen; + u16 pdid; + u16 cur_msn; + u16 max_msn; + u32 qbase; +}; + +struct srq_data { + unsigned int srq_size; + struct srq_entry *entryp; + struct completion comp; + struct mutex lock; /* generic mutex for srq data */ +}; + +struct srq_data *t4_init_srq(int srq_size); +int cxgb4_get_srq_entry(struct net_device *dev, + int srq_idx, struct srq_entry *entryp); +void do_srq_table_rpl(struct adapter *adap, + const struct cpl_srq_table_rpl *rpl); +#endif /* __CXGB4_SRQ_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 047609ef0515..7cb3ef466cc7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -484,6 +484,117 @@ static int t4_edc_err_read(struct adapter *adap, int idx) } /** + * t4_memory_rw_init - Get memory window relative offset, base, and size. + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC + * @mem_off: memory relative offset with respect to @mtype. + * @mem_base: configured memory base address. + * @mem_aperture: configured memory window aperture. + * + * Get the configured memory window's relative offset, base, and size. + */ +int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off, + u32 *mem_base, u32 *mem_aperture) +{ + u32 edc_size, mc_size, mem_reg; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller + * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) + * MEM_HMA = 4 + */ + edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); + if (mtype == MEM_HMA) { + *mem_off = 2 * (edc_size * 1024 * 1024); + } else if (mtype != MEM_MC1) { + *mem_off = (mtype * (edc_size * 1024 * 1024)); + } else { + mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, + MA_EXT_MEMORY0_BAR_A)); + *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, + win)); + /* a dead adapter will return 0xffffffff for PIO reads */ + if (mem_reg == 0xffffffff) + return -ENXIO; + + *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X); + *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; + if (is_t4(adap->params.chip)) + *mem_base -= adap->t4_bar0; + + return 0; +} + +/** + * t4_memory_update_win - Move memory window to specified address. + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @addr: location to move. + * + * Move memory window to specified address. + */ +void t4_memory_update_win(struct adapter *adap, int win, u32 addr) +{ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win), + addr); + /* Read it back to ensure that changes propagate before we + * attempt to use the new value. + */ + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); +} + +/** + * t4_memory_rw_residual - Read/Write residual data. + * @adap: the adapter + * @off: relative offset within residual to start read/write. + * @addr: address within indicated memory type. + * @buf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Read/Write residual data less than 32-bits. + */ +void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf, + int dir) +{ + union { + u32 word; + char byte[4]; + } last; + unsigned char *bp; + int i; + + if (dir == T4_MEMORY_READ) { + last.word = le32_to_cpu((__force __le32) + t4_read_reg(adap, addr)); + for (bp = (unsigned char *)buf, i = off; i < 4; i++) + bp[i] = last.byte[i]; + } else { + last.word = *buf; + for (i = off; i < 4; i++) + last.byte[i] = 0; + t4_write_reg(adap, addr, + (__force u32)cpu_to_le32(last.word)); + } +} + +/** * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window * @adap: the adapter * @win: PCI-E Memory Window to use @@ -504,8 +615,9 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, void *hbuf, int dir) { u32 pos, offset, resid, memoffset; - u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + u32 win_pf, mem_aperture, mem_base; u32 *buf; + int ret; /* Argument sanity checks ... */ @@ -521,59 +633,26 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, resid = len & 0x3; len -= resid; - /* Offset into the region of memory which is being accessed - * MEM_EDC0 = 0 - * MEM_EDC1 = 1 - * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller - * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) - * MEM_HMA = 4 - */ - edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); - if (mtype == MEM_HMA) { - memoffset = 2 * (edc_size * 1024 * 1024); - } else if (mtype != MEM_MC1) { - memoffset = (mtype * (edc_size * 1024 * 1024)); - } else { - mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, - MA_EXT_MEMORY0_BAR_A)); - memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; - } + ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base, + &mem_aperture); + if (ret) + return ret; /* Determine the PCIE_MEM_ACCESS_OFFSET */ addr = addr + memoffset; - /* Each PCI-E Memory Window is programmed with a window size -- or - * "aperture" -- which controls the granularity of its mapping onto - * adapter memory. We need to grab that aperture in order to know - * how to use the specified window. The window is also programmed - * with the base address of the Memory Window in BAR0's address - * space. For T4 this is an absolute PCI-E Bus Address. For T5 - * the address is relative to BAR0. - */ - mem_reg = t4_read_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, - win)); - mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X); - mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; - if (is_t4(adap->params.chip)) - mem_base -= adap->t4_bar0; win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); /* Calculate our initial PCI-E Memory Window Position and Offset into * that Window. */ - pos = addr & ~(mem_aperture-1); + pos = addr & ~(mem_aperture - 1); offset = addr - pos; /* Set up initial PCI-E Memory Window to cover the start of our - * transfer. (Read it back to ensure that changes propagate before we - * attempt to use the new value.) + * transfer. */ - t4_write_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win), - pos | win_pf); - t4_read_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + t4_memory_update_win(adap, win, pos | win_pf); /* Transfer data to/from the adapter as long as there's an integral * number of 32-bit transfers to complete. @@ -628,12 +707,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, if (offset == mem_aperture) { pos += mem_aperture; offset = 0; - t4_write_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, - win), pos | win_pf); - t4_read_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, - win)); + t4_memory_update_win(adap, win, pos | win_pf); } } @@ -642,28 +716,9 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, * residual amount. The PCI-E Memory Window has already been moved * above (if necessary) to cover this final transfer. */ - if (resid) { - union { - u32 word; - char byte[4]; - } last; - unsigned char *bp; - int i; - - if (dir == T4_MEMORY_READ) { - last.word = le32_to_cpu( - (__force __le32)t4_read_reg(adap, - mem_base + offset)); - for (bp = (unsigned char *)buf, i = resid; i < 4; i++) - bp[i] = last.byte[i]; - } else { - last.word = *buf; - for (i = resid; i < 4; i++) - last.byte[i] = 0; - t4_write_reg(adap, mem_base + offset, - (__force u32)cpu_to_le32(last.word)); - } - } + if (resid) + t4_memory_rw_residual(adap, resid, mem_base + offset, + (u8 *)buf, dir); return 0; } @@ -2637,7 +2692,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 @@ -2704,15 +2758,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - /* We have two VPD data structures stored in the adapter VPD area. - * By default, Linux calculates the size of the VPD area by traversing - * the first VPD area at offset 0x0, so we need to tell the OS what - * our real VPD size is. - */ - ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); - if (ret < 0) - goto out; - /* Card information normally starts at VPD_BASE but early cards had * it at 0. */ @@ -4021,8 +4066,6 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; - lc->link_ok = 0; - /* Convert driver coding of Pause Frame Flow Control settings into the * Firmware's API. */ @@ -6046,6 +6089,7 @@ unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx) case CHELSIO_T6: switch (nports) { + case 1: case 2: return 1 << pidx; } break; @@ -8560,6 +8604,25 @@ static int t4_get_flash_params(struct adapter *adap) } break; } + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ + /* This Density -> Size decoding table is taken from ISSI + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x16: /* 32 MB */ + size = 1 << 25; + break; + case 0x17: /* 64MB */ + size = 1 << 26; + break; + default: + dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n", + flashid, density); + return -EINVAL; + } + break; + } case 0xc2: { /* Macronix */ /* This Density -> Size decoding table is taken from Macronix * Data Sheets. diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index d0db4427b77e..fe2029e993a2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -52,6 +52,7 @@ enum { CPL_L2T_WRITE_REQ = 0x12, CPL_SMT_WRITE_REQ = 0x14, CPL_TID_RELEASE = 0x1A, + CPL_SRQ_TABLE_REQ = 0x1C, CPL_TX_DATA_ISO = 0x1F, CPL_CLOSE_LISTSRV_RPL = 0x20, @@ -81,13 +82,15 @@ enum { CPL_RX_ISCSI_CMP = 0x45, CPL_TRACE_PKT_T5 = 0x48, CPL_RX_ISCSI_DDP = 0x49, + CPL_RX_TLS_CMP = 0x4E, CPL_RDMA_READ_REQ = 0x60, CPL_PASS_OPEN_REQ6 = 0x81, CPL_ACT_OPEN_REQ6 = 0x83, - CPL_TX_TLS_PDU = 0x88, + CPL_TX_TLS_PDU = 0x88, + CPL_TX_TLS_SFO = 0x89, CPL_TX_SEC_PDU = 0x8A, CPL_TX_TLS_ACK = 0x8B, @@ -97,11 +100,13 @@ enum { CPL_RX_MPS_PKT = 0xAF, CPL_TRACE_PKT = 0xB0, + CPL_TLS_DATA = 0xB1, CPL_ISCSI_DATA = 0xB2, CPL_FW4_MSG = 0xC0, CPL_FW4_PLD = 0xC1, CPL_FW4_ACK = 0xC3, + CPL_SRQ_TABLE_RPL = 0xCC, CPL_RX_PHYS_DSGL = 0xD0, @@ -136,6 +141,8 @@ enum CPL_error { CPL_ERR_KEEPALV_NEG_ADVICE = 37, CPL_ERR_ABORT_FAILED = 42, CPL_ERR_IWARP_FLM = 50, + CPL_CONTAINS_READ_RPL = 60, + CPL_CONTAINS_WRITE_RPL = 61, }; enum { @@ -151,6 +158,7 @@ enum { ULP_MODE_RDMA = 4, ULP_MODE_TCPDDP = 5, ULP_MODE_FCOE = 6, + ULP_MODE_TLS = 8, }; enum { @@ -198,6 +206,7 @@ union opcode_tid { /* partitioning of TID fields that also carry a queue id */ #define TID_TID_S 0 #define TID_TID_M 0x3fff +#define TID_TID_V(x) ((x) << TID_TID_S) #define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M) #define TID_QID_S 14 @@ -743,6 +752,22 @@ struct cpl_abort_req_rss { u8 status; }; +struct cpl_abort_req_rss6 { + WR_HDR; + union opcode_tid ot; + __u32 srqidx_status; +}; + +#define ABORT_RSS_STATUS_S 0 +#define ABORT_RSS_STATUS_M 0xff +#define ABORT_RSS_STATUS_V(x) ((x) << ABORT_RSS_STATUS_S) +#define ABORT_RSS_STATUS_G(x) (((x) >> ABORT_RSS_STATUS_S) & ABORT_RSS_STATUS_M) + +#define ABORT_RSS_SRQIDX_S 8 +#define ABORT_RSS_SRQIDX_M 0xffffff +#define ABORT_RSS_SRQIDX_V(x) ((x) << ABORT_RSS_SRQIDX_S) +#define ABORT_RSS_SRQIDX_G(x) (((x) >> ABORT_RSS_SRQIDX_S) & ABORT_RSS_SRQIDX_M) + struct cpl_abort_req { WR_HDR; union opcode_tid ot; @@ -758,6 +783,11 @@ struct cpl_abort_rpl_rss { u8 status; }; +struct cpl_abort_rpl_rss6 { + union opcode_tid ot; + __u32 srqidx_status; +}; + struct cpl_abort_rpl { WR_HDR; union opcode_tid ot; @@ -1419,6 +1449,14 @@ struct cpl_tx_data { #define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) #define T6_TX_FORCE_F T6_TX_FORCE_V(1U) +#define TX_SHOVE_S 14 +#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S) + +#define TX_ULP_MODE_S 10 +#define TX_ULP_MODE_M 0x7 +#define TX_ULP_MODE_V(x) ((x) << TX_ULP_MODE_S) +#define TX_ULP_MODE_G(x) (((x) >> TX_ULP_MODE_S) & TX_ULP_MODE_M) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, @@ -1429,12 +1467,21 @@ enum { ULP_TX_SC_NOOP = 0x80, ULP_TX_SC_IMM = 0x81, ULP_TX_SC_DSGL = 0x82, - ULP_TX_SC_ISGL = 0x83 + ULP_TX_SC_ISGL = 0x83, + ULP_TX_SC_MEMRD = 0x86 }; #define ULPTX_CMD_S 24 #define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) +#define ULPTX_LEN16_S 0 +#define ULPTX_LEN16_M 0xFF +#define ULPTX_LEN16_V(x) ((x) << ULPTX_LEN16_S) + +#define ULP_TX_SC_MORE_S 23 +#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) +#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) + struct ulptx_sge_pair { __be32 len[2]; __be64 addr[2]; @@ -2112,4 +2159,146 @@ enum { X_CPL_RX_MPS_PKT_TYPE_QFC = 1 << 2, X_CPL_RX_MPS_PKT_TYPE_PTP = 1 << 3 }; + +struct cpl_srq_table_req { + WR_HDR; + union opcode_tid ot; + __u8 status; + __u8 rsvd[2]; + __u8 idx; + __be64 rsvd_pdid; + __be32 qlen_qbase; + __be16 cur_msn; + __be16 max_msn; +}; + +struct cpl_srq_table_rpl { + union opcode_tid ot; + __u8 status; + __u8 rsvd[2]; + __u8 idx; + __be64 rsvd_pdid; + __be32 qlen_qbase; + __be16 cur_msn; + __be16 max_msn; +}; + +/* cpl_srq_table_{req,rpl}.params fields */ +#define SRQT_QLEN_S 28 +#define SRQT_QLEN_M 0xF +#define SRQT_QLEN_V(x) ((x) << SRQT_QLEN_S) +#define SRQT_QLEN_G(x) (((x) >> SRQT_QLEN_S) & SRQT_QLEN_M) + +#define SRQT_QBASE_S 0 +#define SRQT_QBASE_M 0x3FFFFFF +#define SRQT_QBASE_V(x) ((x) << SRQT_QBASE_S) +#define SRQT_QBASE_G(x) (((x) >> SRQT_QBASE_S) & SRQT_QBASE_M) + +#define SRQT_PDID_S 0 +#define SRQT_PDID_M 0xFF +#define SRQT_PDID_V(x) ((x) << SRQT_PDID_S) +#define SRQT_PDID_G(x) (((x) >> SRQT_PDID_S) & SRQT_PDID_M) + +#define SRQT_IDX_S 0 +#define SRQT_IDX_M 0xF +#define SRQT_IDX_V(x) ((x) << SRQT_IDX_S) +#define SRQT_IDX_G(x) (((x) >> SRQT_IDX_S) & SRQT_IDX_M) + +struct cpl_tx_tls_sfo { + __be32 op_to_seg_len; + __be32 pld_len; + __be32 type_protover; + __be32 r1_lo; + __be32 seqno_numivs; + __be32 ivgen_hdrlen; + __be64 scmd1; +}; + +/* cpl_tx_tls_sfo macros */ +#define CPL_TX_TLS_SFO_OPCODE_S 24 +#define CPL_TX_TLS_SFO_OPCODE_V(x) ((x) << CPL_TX_TLS_SFO_OPCODE_S) + +#define CPL_TX_TLS_SFO_DATA_TYPE_S 20 +#define CPL_TX_TLS_SFO_DATA_TYPE_V(x) ((x) << CPL_TX_TLS_SFO_DATA_TYPE_S) + +#define CPL_TX_TLS_SFO_CPL_LEN_S 16 +#define CPL_TX_TLS_SFO_CPL_LEN_V(x) ((x) << CPL_TX_TLS_SFO_CPL_LEN_S) + +#define CPL_TX_TLS_SFO_SEG_LEN_S 0 +#define CPL_TX_TLS_SFO_SEG_LEN_M 0xffff +#define CPL_TX_TLS_SFO_SEG_LEN_V(x) ((x) << CPL_TX_TLS_SFO_SEG_LEN_S) +#define CPL_TX_TLS_SFO_SEG_LEN_G(x) \ + (((x) >> CPL_TX_TLS_SFO_SEG_LEN_S) & CPL_TX_TLS_SFO_SEG_LEN_M) + +#define CPL_TX_TLS_SFO_TYPE_S 24 +#define CPL_TX_TLS_SFO_TYPE_M 0xff +#define CPL_TX_TLS_SFO_TYPE_V(x) ((x) << CPL_TX_TLS_SFO_TYPE_S) +#define CPL_TX_TLS_SFO_TYPE_G(x) \ + (((x) >> CPL_TX_TLS_SFO_TYPE_S) & CPL_TX_TLS_SFO_TYPE_M) + +#define CPL_TX_TLS_SFO_PROTOVER_S 8 +#define CPL_TX_TLS_SFO_PROTOVER_M 0xffff +#define CPL_TX_TLS_SFO_PROTOVER_V(x) ((x) << CPL_TX_TLS_SFO_PROTOVER_S) +#define CPL_TX_TLS_SFO_PROTOVER_G(x) \ + (((x) >> CPL_TX_TLS_SFO_PROTOVER_S) & CPL_TX_TLS_SFO_PROTOVER_M) + +struct cpl_tls_data { + struct rss_header rsshdr; + union opcode_tid ot; + __be32 length_pkd; + __be32 seq; + __be32 r1; +}; + +#define CPL_TLS_DATA_OPCODE_S 24 +#define CPL_TLS_DATA_OPCODE_M 0xff +#define CPL_TLS_DATA_OPCODE_V(x) ((x) << CPL_TLS_DATA_OPCODE_S) +#define CPL_TLS_DATA_OPCODE_G(x) \ + (((x) >> CPL_TLS_DATA_OPCODE_S) & CPL_TLS_DATA_OPCODE_M) + +#define CPL_TLS_DATA_TID_S 0 +#define CPL_TLS_DATA_TID_M 0xffffff +#define CPL_TLS_DATA_TID_V(x) ((x) << CPL_TLS_DATA_TID_S) +#define CPL_TLS_DATA_TID_G(x) \ + (((x) >> CPL_TLS_DATA_TID_S) & CPL_TLS_DATA_TID_M) + +#define CPL_TLS_DATA_LENGTH_S 0 +#define CPL_TLS_DATA_LENGTH_M 0xffff +#define CPL_TLS_DATA_LENGTH_V(x) ((x) << CPL_TLS_DATA_LENGTH_S) +#define CPL_TLS_DATA_LENGTH_G(x) \ + (((x) >> CPL_TLS_DATA_LENGTH_S) & CPL_TLS_DATA_LENGTH_M) + +struct cpl_rx_tls_cmp { + struct rss_header rsshdr; + union opcode_tid ot; + __be32 pdulength_length; + __be32 seq; + __be32 ddp_report; + __be32 r; + __be32 ddp_valid; +}; + +#define CPL_RX_TLS_CMP_OPCODE_S 24 +#define CPL_RX_TLS_CMP_OPCODE_M 0xff +#define CPL_RX_TLS_CMP_OPCODE_V(x) ((x) << CPL_RX_TLS_CMP_OPCODE_S) +#define CPL_RX_TLS_CMP_OPCODE_G(x) \ + (((x) >> CPL_RX_TLS_CMP_OPCODE_S) & CPL_RX_TLS_CMP_OPCODE_M) + +#define CPL_RX_TLS_CMP_TID_S 0 +#define CPL_RX_TLS_CMP_TID_M 0xffffff +#define CPL_RX_TLS_CMP_TID_V(x) ((x) << CPL_RX_TLS_CMP_TID_S) +#define CPL_RX_TLS_CMP_TID_G(x) \ + (((x) >> CPL_RX_TLS_CMP_TID_S) & CPL_RX_TLS_CMP_TID_M) + +#define CPL_RX_TLS_CMP_PDULENGTH_S 16 +#define CPL_RX_TLS_CMP_PDULENGTH_M 0xffff +#define CPL_RX_TLS_CMP_PDULENGTH_V(x) ((x) << CPL_RX_TLS_CMP_PDULENGTH_S) +#define CPL_RX_TLS_CMP_PDULENGTH_G(x) \ + (((x) >> CPL_RX_TLS_CMP_PDULENGTH_S) & CPL_RX_TLS_CMP_PDULENGTH_M) + +#define CPL_RX_TLS_CMP_LENGTH_S 0 +#define CPL_RX_TLS_CMP_LENGTH_M 0xffff +#define CPL_RX_TLS_CMP_LENGTH_V(x) ((x) << CPL_RX_TLS_CMP_LENGTH_S) +#define CPL_RX_TLS_CMP_LENGTH_G(x) \ + (((x) >> CPL_RX_TLS_CMP_LENGTH_S) & CPL_RX_TLS_CMP_LENGTH_M) #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a6df73398d17..276fdf214b75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2775,6 +2775,8 @@ #define ULP_RX_LA_RDPTR_A 0x19240 #define ULP_RX_LA_RDDATA_A 0x19244 #define ULP_RX_LA_WRPTR_A 0x19248 +#define ULP_RX_TLS_KEY_LLIMIT_A 0x192ac +#define ULP_RX_TLS_KEY_ULIMIT_A 0x192b0 #define HPZ3_S 24 #define HPZ3_V(x) ((x) << HPZ3_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0d83b4064a78..e3d4751f21ac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -101,9 +101,11 @@ enum fw_wr_opcodes { FW_RI_BIND_MW_WR = 0x18, FW_RI_FR_NSMR_WR = 0x19, FW_RI_FR_NSMR_TPTE_WR = 0x20, + FW_RI_RDMA_WRITE_CMPL_WR = 0x21, FW_RI_INV_LSTAG_WR = 0x1a, FW_ISCSI_TX_DATA_WR = 0x45, FW_PTP_TX_PKT_WR = 0x46, + FW_TLSTX_DATA_WR = 0x68, FW_CRYPTO_LOOKASIDE_WR = 0X6d, FW_LASTC2E_WR = 0x70, FW_FILTER2_WR = 0x77 @@ -634,6 +636,30 @@ struct fw_ofld_connection_wr { #define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \ FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U) +enum fw_flowc_mnem_tcpstate { + FW_FLOWC_MNEM_TCPSTATE_CLOSED = 0, /* illegal */ + FW_FLOWC_MNEM_TCPSTATE_LISTEN = 1, /* illegal */ + FW_FLOWC_MNEM_TCPSTATE_SYNSENT = 2, /* illegal */ + FW_FLOWC_MNEM_TCPSTATE_SYNRECEIVED = 3, /* illegal */ + FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED = 4, /* default */ + FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT = 5, /* got peer close already */ + FW_FLOWC_MNEM_TCPSTATE_FINWAIT1 = 6, /* haven't gotten ACK for FIN and + * will resend FIN - equiv ESTAB + */ + FW_FLOWC_MNEM_TCPSTATE_CLOSING = 7, /* haven't gotten ACK for FIN and + * will resend FIN but have + * received FIN + */ + FW_FLOWC_MNEM_TCPSTATE_LASTACK = 8, /* haven't gotten ACK for FIN and + * will resend FIN but have + * received FIN + */ + FW_FLOWC_MNEM_TCPSTATE_FINWAIT2 = 9, /* sent FIN and got FIN + ACK, + * waiting for FIN + */ + FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */ +}; + enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ FW_FLOWC_MNEM_CH, @@ -650,6 +676,8 @@ enum fw_flowc_mnem { FW_FLOWC_MNEM_DCBPRIO, FW_FLOWC_MNEM_SND_SCALE, FW_FLOWC_MNEM_RCV_SCALE, + FW_FLOWC_MNEM_ULD_MODE, + FW_FLOWC_MNEM_MAX, }; struct fw_flowc_mnemval { @@ -674,6 +702,14 @@ struct fw_ofld_tx_data_wr { __be32 tunnel_to_proxy; }; +#define FW_OFLD_TX_DATA_WR_ALIGNPLD_S 30 +#define FW_OFLD_TX_DATA_WR_ALIGNPLD_V(x) ((x) << FW_OFLD_TX_DATA_WR_ALIGNPLD_S) +#define FW_OFLD_TX_DATA_WR_ALIGNPLD_F FW_OFLD_TX_DATA_WR_ALIGNPLD_V(1U) + +#define FW_OFLD_TX_DATA_WR_SHOVE_S 29 +#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) +#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) + #define FW_OFLD_TX_DATA_WR_TUNNEL_S 19 #define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S) @@ -690,10 +726,6 @@ struct fw_ofld_tx_data_wr { #define FW_OFLD_TX_DATA_WR_MORE_S 15 #define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S) -#define FW_OFLD_TX_DATA_WR_SHOVE_S 14 -#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) -#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) - #define FW_OFLD_TX_DATA_WR_ULPMODE_S 10 #define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S) @@ -766,6 +798,7 @@ enum fw_cmd_opcodes { FW_DEVLOG_CMD = 0x25, FW_CLIP_CMD = 0x28, FW_PTP_CMD = 0x3e, + FW_HMA_CMD = 0x3f, FW_LASTC2E_CMD = 0x40, FW_ERROR_CMD = 0x80, FW_DEBUG_CMD = 0x81, @@ -1119,6 +1152,12 @@ enum fw_caps_config_iscsi { FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, }; +enum fw_caps_config_crypto { + FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001, + FW_CAPS_CONFIG_TLS_INLINE = 0x00000002, + FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, +}; + enum fw_caps_config_fcoe { FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, @@ -1132,6 +1171,7 @@ enum fw_memtype_cf { FW_MEMTYPE_CF_FLASH = 0x4, FW_MEMTYPE_CF_INTERNAL = 0x5, FW_MEMTYPE_CF_EXTMEM1 = 0x6, + FW_MEMTYPE_CF_HMA = 0x7, }; struct fw_caps_config_cmd { @@ -1210,6 +1250,9 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, + FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20, + FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, + FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, }; /* @@ -1241,6 +1284,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, + FW_PARAMS_PARAM_PFVF_SRQ_START = 0x19, + FW_PARAMS_PARAM_PFVF_SRQ_END = 0x1A, FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, FW_PARAMS_PARAM_PFVF_VIID = 0x24, FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, @@ -1258,6 +1303,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32, FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, + FW_PARAMS_PARAM_PFVF_TLS_START = 0x34, + FW_PARAMS_PARAM_PFVF_TLS_END = 0x35, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; @@ -3435,6 +3482,59 @@ struct fw_debug_cmd { #define FW_DEBUG_CMD_TYPE_G(x) \ (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) +struct fw_hma_cmd { + __be32 op_pkd; + __be32 retval_len16; + __be32 mode_to_pcie_params; + __be32 naddr_size; + __be32 addr_size_pkd; + __be32 r6; + __be64 phy_address[5]; +}; + +#define FW_HMA_CMD_MODE_S 31 +#define FW_HMA_CMD_MODE_M 0x1 +#define FW_HMA_CMD_MODE_V(x) ((x) << FW_HMA_CMD_MODE_S) +#define FW_HMA_CMD_MODE_G(x) \ + (((x) >> FW_HMA_CMD_MODE_S) & FW_HMA_CMD_MODE_M) +#define FW_HMA_CMD_MODE_F FW_HMA_CMD_MODE_V(1U) + +#define FW_HMA_CMD_SOC_S 30 +#define FW_HMA_CMD_SOC_M 0x1 +#define FW_HMA_CMD_SOC_V(x) ((x) << FW_HMA_CMD_SOC_S) +#define FW_HMA_CMD_SOC_G(x) (((x) >> FW_HMA_CMD_SOC_S) & FW_HMA_CMD_SOC_M) +#define FW_HMA_CMD_SOC_F FW_HMA_CMD_SOC_V(1U) + +#define FW_HMA_CMD_EOC_S 29 +#define FW_HMA_CMD_EOC_M 0x1 +#define FW_HMA_CMD_EOC_V(x) ((x) << FW_HMA_CMD_EOC_S) +#define FW_HMA_CMD_EOC_G(x) (((x) >> FW_HMA_CMD_EOC_S) & FW_HMA_CMD_EOC_M) +#define FW_HMA_CMD_EOC_F FW_HMA_CMD_EOC_V(1U) + +#define FW_HMA_CMD_PCIE_PARAMS_S 0 +#define FW_HMA_CMD_PCIE_PARAMS_M 0x7ffffff +#define FW_HMA_CMD_PCIE_PARAMS_V(x) ((x) << FW_HMA_CMD_PCIE_PARAMS_S) +#define FW_HMA_CMD_PCIE_PARAMS_G(x) \ + (((x) >> FW_HMA_CMD_PCIE_PARAMS_S) & FW_HMA_CMD_PCIE_PARAMS_M) + +#define FW_HMA_CMD_NADDR_S 12 +#define FW_HMA_CMD_NADDR_M 0x3f +#define FW_HMA_CMD_NADDR_V(x) ((x) << FW_HMA_CMD_NADDR_S) +#define FW_HMA_CMD_NADDR_G(x) \ + (((x) >> FW_HMA_CMD_NADDR_S) & FW_HMA_CMD_NADDR_M) + +#define FW_HMA_CMD_SIZE_S 0 +#define FW_HMA_CMD_SIZE_M 0xfff +#define FW_HMA_CMD_SIZE_V(x) ((x) << FW_HMA_CMD_SIZE_S) +#define FW_HMA_CMD_SIZE_G(x) \ + (((x) >> FW_HMA_CMD_SIZE_S) & FW_HMA_CMD_SIZE_M) + +#define FW_HMA_CMD_ADDR_SIZE_S 11 +#define FW_HMA_CMD_ADDR_SIZE_M 0x1fffff +#define FW_HMA_CMD_ADDR_SIZE_V(x) ((x) << FW_HMA_CMD_ADDR_SIZE_S) +#define FW_HMA_CMD_ADDR_SIZE_G(x) \ + (((x) >> FW_HMA_CMD_ADDR_SIZE_S) & FW_HMA_CMD_ADDR_SIZE_M) + enum pcie_fw_eval { PCIE_FW_EVAL_CRASH = 0, }; @@ -3778,4 +3878,122 @@ struct fw_crypto_lookaside_wr { (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M) +struct fw_tlstx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 lsodisable_to_flags; + __be32 r5; + __be32 ctxloc_to_exp; + __be16 mfs; + __be16 adjustedplen_pkd; + __be16 expinplenmax_pkd; + u8 pdusinplenmax_pkd; + u8 r10; +}; + +#define FW_TLSTX_DATA_WR_OPCODE_S 24 +#define FW_TLSTX_DATA_WR_OPCODE_M 0xff +#define FW_TLSTX_DATA_WR_OPCODE_V(x) ((x) << FW_TLSTX_DATA_WR_OPCODE_S) +#define FW_TLSTX_DATA_WR_OPCODE_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_OPCODE_S) & FW_TLSTX_DATA_WR_OPCODE_M) + +#define FW_TLSTX_DATA_WR_COMPL_S 21 +#define FW_TLSTX_DATA_WR_COMPL_M 0x1 +#define FW_TLSTX_DATA_WR_COMPL_V(x) ((x) << FW_TLSTX_DATA_WR_COMPL_S) +#define FW_TLSTX_DATA_WR_COMPL_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_COMPL_S) & FW_TLSTX_DATA_WR_COMPL_M) +#define FW_TLSTX_DATA_WR_COMPL_F FW_TLSTX_DATA_WR_COMPL_V(1U) + +#define FW_TLSTX_DATA_WR_IMMDLEN_S 0 +#define FW_TLSTX_DATA_WR_IMMDLEN_M 0xff +#define FW_TLSTX_DATA_WR_IMMDLEN_V(x) ((x) << FW_TLSTX_DATA_WR_IMMDLEN_S) +#define FW_TLSTX_DATA_WR_IMMDLEN_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_IMMDLEN_S) & FW_TLSTX_DATA_WR_IMMDLEN_M) + +#define FW_TLSTX_DATA_WR_FLOWID_S 8 +#define FW_TLSTX_DATA_WR_FLOWID_M 0xfffff +#define FW_TLSTX_DATA_WR_FLOWID_V(x) ((x) << FW_TLSTX_DATA_WR_FLOWID_S) +#define FW_TLSTX_DATA_WR_FLOWID_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_FLOWID_S) & FW_TLSTX_DATA_WR_FLOWID_M) + +#define FW_TLSTX_DATA_WR_LEN16_S 0 +#define FW_TLSTX_DATA_WR_LEN16_M 0xff +#define FW_TLSTX_DATA_WR_LEN16_V(x) ((x) << FW_TLSTX_DATA_WR_LEN16_S) +#define FW_TLSTX_DATA_WR_LEN16_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_LEN16_S) & FW_TLSTX_DATA_WR_LEN16_M) + +#define FW_TLSTX_DATA_WR_LSODISABLE_S 31 +#define FW_TLSTX_DATA_WR_LSODISABLE_M 0x1 +#define FW_TLSTX_DATA_WR_LSODISABLE_V(x) \ + ((x) << FW_TLSTX_DATA_WR_LSODISABLE_S) +#define FW_TLSTX_DATA_WR_LSODISABLE_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_LSODISABLE_S) & FW_TLSTX_DATA_WR_LSODISABLE_M) +#define FW_TLSTX_DATA_WR_LSODISABLE_F FW_TLSTX_DATA_WR_LSODISABLE_V(1U) + +#define FW_TLSTX_DATA_WR_ALIGNPLD_S 30 +#define FW_TLSTX_DATA_WR_ALIGNPLD_M 0x1 +#define FW_TLSTX_DATA_WR_ALIGNPLD_V(x) ((x) << FW_TLSTX_DATA_WR_ALIGNPLD_S) +#define FW_TLSTX_DATA_WR_ALIGNPLD_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_ALIGNPLD_S) & FW_TLSTX_DATA_WR_ALIGNPLD_M) +#define FW_TLSTX_DATA_WR_ALIGNPLD_F FW_TLSTX_DATA_WR_ALIGNPLD_V(1U) + +#define FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_S 29 +#define FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_M 0x1 +#define FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_V(x) \ + ((x) << FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_S) +#define FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_S) & \ + FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_M) +#define FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_F FW_TLSTX_DATA_WR_ALIGNPLDSHOVE_V(1U) + +#define FW_TLSTX_DATA_WR_FLAGS_S 0 +#define FW_TLSTX_DATA_WR_FLAGS_M 0xfffffff +#define FW_TLSTX_DATA_WR_FLAGS_V(x) ((x) << FW_TLSTX_DATA_WR_FLAGS_S) +#define FW_TLSTX_DATA_WR_FLAGS_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_FLAGS_S) & FW_TLSTX_DATA_WR_FLAGS_M) + +#define FW_TLSTX_DATA_WR_CTXLOC_S 30 +#define FW_TLSTX_DATA_WR_CTXLOC_M 0x3 +#define FW_TLSTX_DATA_WR_CTXLOC_V(x) ((x) << FW_TLSTX_DATA_WR_CTXLOC_S) +#define FW_TLSTX_DATA_WR_CTXLOC_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_CTXLOC_S) & FW_TLSTX_DATA_WR_CTXLOC_M) + +#define FW_TLSTX_DATA_WR_IVDSGL_S 29 +#define FW_TLSTX_DATA_WR_IVDSGL_M 0x1 +#define FW_TLSTX_DATA_WR_IVDSGL_V(x) ((x) << FW_TLSTX_DATA_WR_IVDSGL_S) +#define FW_TLSTX_DATA_WR_IVDSGL_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_IVDSGL_S) & FW_TLSTX_DATA_WR_IVDSGL_M) +#define FW_TLSTX_DATA_WR_IVDSGL_F FW_TLSTX_DATA_WR_IVDSGL_V(1U) + +#define FW_TLSTX_DATA_WR_KEYSIZE_S 24 +#define FW_TLSTX_DATA_WR_KEYSIZE_M 0x1f +#define FW_TLSTX_DATA_WR_KEYSIZE_V(x) ((x) << FW_TLSTX_DATA_WR_KEYSIZE_S) +#define FW_TLSTX_DATA_WR_KEYSIZE_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_KEYSIZE_S) & FW_TLSTX_DATA_WR_KEYSIZE_M) + +#define FW_TLSTX_DATA_WR_NUMIVS_S 14 +#define FW_TLSTX_DATA_WR_NUMIVS_M 0xff +#define FW_TLSTX_DATA_WR_NUMIVS_V(x) ((x) << FW_TLSTX_DATA_WR_NUMIVS_S) +#define FW_TLSTX_DATA_WR_NUMIVS_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_NUMIVS_S) & FW_TLSTX_DATA_WR_NUMIVS_M) + +#define FW_TLSTX_DATA_WR_EXP_S 0 +#define FW_TLSTX_DATA_WR_EXP_M 0x3fff +#define FW_TLSTX_DATA_WR_EXP_V(x) ((x) << FW_TLSTX_DATA_WR_EXP_S) +#define FW_TLSTX_DATA_WR_EXP_G(x) \ + (((x) >> FW_TLSTX_DATA_WR_EXP_S) & FW_TLSTX_DATA_WR_EXP_M) + +#define FW_TLSTX_DATA_WR_ADJUSTEDPLEN_S 1 +#define FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(x) \ + ((x) << FW_TLSTX_DATA_WR_ADJUSTEDPLEN_S) + +#define FW_TLSTX_DATA_WR_EXPINPLENMAX_S 4 +#define FW_TLSTX_DATA_WR_EXPINPLENMAX_V(x) \ + ((x) << FW_TLSTX_DATA_WR_EXPINPLENMAX_S) + +#define FW_TLSTX_DATA_WR_PDUSINPLENMAX_S 2 +#define FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(x) \ + ((x) << FW_TLSTX_DATA_WR_PDUSINPLENMAX_S) + #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index b7e79e64d2ed..9a81b52307a9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -155,8 +155,6 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) const char *fc; const struct port_info *pi = netdev_priv(dev); - netif_carrier_on(dev); - switch (pi->link_cfg.speed) { case 100: s = "100Mbps"; @@ -202,7 +200,6 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); } else { - netif_carrier_off(dev); netdev_info(dev, "link down\n"); } } @@ -278,6 +275,17 @@ static int link_start(struct net_device *dev) */ if (ret == 0) ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + + /* The Virtual Interfaces are connected to an internal switch on the + * chip which allows VIs attached to the same port to talk to each + * other even when the port link is down. As a result, we generally + * want to always report a VI's link as being "up", provided there are + * no errors in enabling vi. + */ + + if (ret == 0) + netif_carrier_on(dev); + return ret; } @@ -1281,22 +1289,22 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_KR: SET_LMM(Backplane); - SET_LMM(10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break; case FW_PORT_TYPE_BP_AP: SET_LMM(Backplane); - SET_LMM(10000baseR_FEC); - SET_LMM(10000baseKR_Full); - SET_LMM(1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); break; case FW_PORT_TYPE_BP4_AP: SET_LMM(Backplane); - SET_LMM(10000baseR_FEC); - SET_LMM(10000baseKR_Full); - SET_LMM(1000baseKX_Full); - SET_LMM(10000baseKX4_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); break; case FW_PORT_TYPE_FIBER_XFI: @@ -1312,18 +1320,24 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_QSFP: SET_LMM(FIBRE); - SET_LMM(40000baseSR4_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); break; case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_SFP28: SET_LMM(FIBRE); - SET_LMM(25000baseCR_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); break; case FW_PORT_TYPE_KR_SFP28: SET_LMM(Backplane); - SET_LMM(25000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); break; case FW_PORT_TYPE_KR_XLAUI: @@ -1335,13 +1349,18 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); - SET_LMM(50000baseSR2_Full); + FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full); break; case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); - SET_LMM(100000baseCR4_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); + FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); + FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full); break; default: @@ -2383,11 +2402,11 @@ struct cxgb4vf_debugfs_entry { }; static struct cxgb4vf_debugfs_entry debugfs_files[] = { - { "mboxlog", S_IRUGO, &mboxlog_fops }, - { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, - { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, - { "resources", S_IRUGO, &resources_proc_fops }, - { "interfaces", S_IRUGO, &interfaces_proc_fops }, + { "mboxlog", 0444, &mboxlog_fops }, + { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops }, + { "sge_qstats", 0444, &sge_qstats_proc_fops }, + { "resources", 0444, &resources_proc_fops }, + { "interfaces", 0444, &interfaces_proc_fops }, }; /* diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 977d4c2c759d..3f8fe8fd79cc 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -56,21 +56,11 @@ local_irq_{dis,en}able() */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + static const char version[] = "cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n"; -/* ======================= configure the driver here ======================= */ - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif - -/* ======================= end of configuration ======================= */ - - -/* Always include 'config.h' first in case the user wants to turn on - or override something. */ #include <linux/module.h> /* @@ -93,6 +83,7 @@ static const char version[] = #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> +#include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> @@ -105,24 +96,22 @@ static const char version[] = #include "cs89x0.h" -static unsigned int net_debug = NET_DEBUG; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "debug message level"); /* Information that need to be kept for each board. */ struct net_local { + int msg_enable; int chip_type; /* one of: CS8900, CS8920, CS8920M */ char chip_revision; /* revision letter of the chip ('A'...) */ int send_cmd; /* the propercommand used to send a packet. */ int rx_mode; int curr_rx_cfg; int send_underrun; /* keep track of how many underruns in a row we get */ - struct sk_buff *skb; }; /* Index to functions, as function prototypes. */ - -#if 0 -extern void reset_chip(struct net_device *dev); -#endif static int net_open(struct net_device *dev); static int net_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t net_interrupt(int irq, void *dev_id); @@ -132,10 +121,6 @@ static int net_close(struct net_device *dev); static struct net_device_stats *net_get_stats(struct net_device *dev); static int set_mac_address(struct net_device *dev, void *addr); - -/* Example routines you must write ;->. */ -#define tx_done(dev) 1 - /* For reading/writing registers ISA-style */ static inline int readreg_io(struct net_device *dev, int portno) @@ -176,12 +161,10 @@ static const struct net_device_ops mac89x0_netdev_ops = { /* Probe for the CS8900 card in slot E. We won't bother looking anywhere else until we have a really good reason to do so. */ -struct net_device * __init mac89x0_probe(int unit) +static int mac89x0_device_probe(struct platform_device *pdev) { struct net_device *dev; - static int once_is_enough; struct net_local *lp; - static unsigned version_printed; int i, slot; unsigned rev_type = 0; unsigned long ioaddr; @@ -189,21 +172,9 @@ struct net_device * __init mac89x0_probe(int unit) int err = -ENODEV; struct nubus_rsrc *fres; - if (!MACH_IS_MAC) - return ERR_PTR(-ENODEV); - dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - if (once_is_enough) - goto out; - once_is_enough = 1; + return -ENOMEM; /* We might have to parameterize this later */ slot = 0xE; @@ -230,9 +201,13 @@ struct net_device * __init mac89x0_probe(int unit) if (sig != swab16(CHIP_EISA_ID_SIG)) goto out; + SET_NETDEV_DEV(dev, &pdev->dev); + /* Initialize the net_device structure. */ lp = netdev_priv(dev); + lp->msg_enable = netif_msg_init(debug, 0); + /* Fill in the 'dev' fields. */ dev->base_addr = ioaddr; dev->mem_start = (unsigned long) @@ -255,19 +230,16 @@ struct net_device * __init mac89x0_probe(int unit) if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') lp->send_cmd = TX_NOW; - if (net_debug && version_printed++ == 0) - printk(version); + netif_dbg(lp, drv, dev, "%s", version); - printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx", - dev->name, - lp->chip_type==CS8900?'0':'2', - lp->chip_type==CS8920M?"M":"", - lp->chip_revision, - dev->base_addr); + pr_info("cs89%c0%s rev %c found at %#8lx\n", + lp->chip_type == CS8900 ? '0' : '2', + lp->chip_type == CS8920M ? "M" : "", + lp->chip_revision, dev->base_addr); /* Try to read the MAC address */ if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) { - printk("\nmac89x0: No EEPROM, giving up now.\n"); + pr_info("No EEPROM, giving up now.\n"); goto out1; } else { for (i = 0; i < ETH_ALEN; i += 2) { @@ -282,39 +254,23 @@ struct net_device * __init mac89x0_probe(int unit) /* print the IRQ and ethernet address. */ - printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); + pr_info("MAC %pM, IRQ %d\n", dev->dev_addr, dev->irq); dev->netdev_ops = &mac89x0_netdev_ops; err = register_netdev(dev); if (err) goto out1; - return NULL; + + platform_set_drvdata(pdev, dev); + return 0; out1: nubus_writew(0, dev->base_addr + ADD_PORT); out: free_netdev(dev); - return ERR_PTR(err); + return err; } -#if 0 -/* This is useful for something, but I don't know what yet. */ -void __init reset_chip(struct net_device *dev) -{ - int reset_start_time; - - writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); - - /* wait 30 ms */ - msleep_interruptible(30); - - /* Wait until the chip is reset */ - reset_start_time = jiffies; - while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2) - ; -} -#endif - /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. @@ -374,11 +330,9 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev) struct net_local *lp = netdev_priv(dev); unsigned long flags; - if (net_debug > 3) - printk("%s: sent %d byte packet of type %x\n", - dev->name, skb->len, - (skb->data[ETH_ALEN+ETH_ALEN] << 8) - | skb->data[ETH_ALEN+ETH_ALEN+1]); + netif_dbg(lp, tx_queued, dev, "sent %d byte packet of type %x\n", + skb->len, skb->data[ETH_ALEN + ETH_ALEN] << 8 | + skb->data[ETH_ALEN + ETH_ALEN + 1]); /* keep the upload from being interrupted, since we ask the chip to start transmitting before the @@ -416,11 +370,6 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) struct net_local *lp; int ioaddr, status; - if (dev == NULL) { - printk ("net_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - ioaddr = dev->base_addr; lp = netdev_priv(dev); @@ -432,7 +381,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) faster than you can read them off, you're screwed. Hasta la vista, baby! */ while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) { - if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); + netif_dbg(lp, intr, dev, "status=%04x\n", status); switch(status & ISQ_EVENT_MASK) { case ISQ_RECEIVER_EVENT: /* Got a packet(s). */ @@ -462,7 +411,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) netif_wake_queue(dev); } if (status & TX_UNDERRUN) { - if (net_debug > 0) printk("%s: transmit underrun\n", dev->name); + netif_dbg(lp, tx_err, dev, "transmit underrun\n"); lp->send_underrun++; if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381; else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL; @@ -483,6 +432,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) static void net_rx(struct net_device *dev) { + struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; int status, length; @@ -506,7 +456,6 @@ net_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = alloc_skb(length, GFP_ATOMIC); if (skb == NULL) { - printk("%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; return; } @@ -515,10 +464,9 @@ net_rx(struct net_device *dev) skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame), length); - if (net_debug > 3)printk("%s: received %d byte packet of type %x\n", - dev->name, length, - (skb->data[ETH_ALEN+ETH_ALEN] << 8) - | skb->data[ETH_ALEN+ETH_ALEN+1]); + netif_dbg(lp, rx_status, dev, "received %d byte packet of type %x\n", + length, skb->data[ETH_ALEN + ETH_ALEN] << 8 | + skb->data[ETH_ALEN + ETH_ALEN + 1]); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); @@ -594,7 +542,7 @@ static int set_mac_address(struct net_device *dev, void *addr) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - printk("%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr); + netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr); /* set the Ethernet address */ for (i=0; i < ETH_ALEN/2; i++) @@ -603,32 +551,24 @@ static int set_mac_address(struct net_device *dev, void *addr) return 0; } -#ifdef MODULE - -static struct net_device *dev_cs89x0; -static int debug; - -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); MODULE_LICENSE("GPL"); -int __init -init_module(void) +static int mac89x0_device_remove(struct platform_device *pdev) { - net_debug = debug; - dev_cs89x0 = mac89x0_probe(-1); - if (IS_ERR(dev_cs89x0)) { - printk(KERN_WARNING "mac89x0.c: No card found\n"); - return PTR_ERR(dev_cs89x0); - } + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_netdev(dev); + nubus_writew(0, dev->base_addr + ADD_PORT); + free_netdev(dev); return 0; } -void -cleanup_module(void) -{ - unregister_netdev(dev_cs89x0); - nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT); - free_netdev(dev_cs89x0); -} -#endif /* MODULE */ +static struct platform_driver mac89x0_platform_driver = { + .probe = mac89x0_device_probe, + .remove = mac89x0_device_remove, + .driver = { + .name = "mac89x0", + }, +}; + +module_platform_driver(mac89x0_platform_driver); diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 9b218f0e5a4c..0dd64acd2a3f 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.3.0.45" +#define DRV_VERSION "2.3.0.53" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -140,6 +140,7 @@ struct enic_rfs_flw_tbl { struct vxlan_offload { u16 vxlan_udp_port_number; u8 patch_level; + u8 flags; }; /* Per-instance private data structure */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index efb9333c7cf8..869006c2002d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -474,6 +474,39 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) return 0; } +static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + switch (cmd->flow_type) { + case TCP_V6_FLOW: + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V6_FLOW: + case UDP_V4_FLOW: + if (vnic_dev_capable_udp_rss(enic->vdev)) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -500,6 +533,9 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = enic_grxclsrule(enic, cmd); spin_unlock_bh(&enic->rfs_h.lock); break; + case ETHTOOL_GRXFH: + ret = enic_get_rx_flow_hash(enic, cmd); + break; default: ret = -EOPNOTSUPP; break; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f202ba72a811..81684acf52af 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -191,8 +191,16 @@ static void enic_udp_tunnel_add(struct net_device *netdev, goto error; } - if (ti->sa_family != AF_INET) { - netdev_info(netdev, "vxlan: only IPv4 offload supported"); + switch (ti->sa_family) { + case AF_INET6: + if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) { + netdev_info(netdev, "vxlan: only IPv4 offload supported"); + goto error; + } + /* Fall through */ + case AF_INET: + break; + default: goto error; } @@ -204,6 +212,11 @@ static void enic_udp_tunnel_add(struct net_device *netdev, goto error; } + if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) && + !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) { + netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter"); + goto error; + } err = vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, @@ -238,9 +251,8 @@ static void enic_udp_tunnel_del(struct net_device *netdev, spin_lock_bh(&enic->devcmd_lock); - if ((ti->sa_family != AF_INET) || - ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) || - (ti->type != UDP_TUNNEL_TYPE_VXLAN)) { + if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) || + ti->type != UDP_TUNNEL_TYPE_VXLAN) { netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", ntohs(ti->port), ti->sa_family, ti->type); goto unlock; @@ -271,22 +283,37 @@ static netdev_features_t enic_features_check(struct sk_buff *skb, struct enic *enic = netdev_priv(dev); struct udphdr *udph; u16 port = 0; - u16 proto; + u8 proto; if (!skb->encapsulation) return features; features = vxlan_features_check(skb, features); - /* hardware only supports IPv4 vxlan tunnel */ - if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IPV6): + if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) + goto out; + proto = ipv6_hdr(skb)->nexthdr; + break; + case htons(ETH_P_IP): + proto = ip_hdr(skb)->protocol; + break; + default: goto out; + } - /* hardware does not support offload of ipv6 inner pkt */ - if (eth->h_proto != ntohs(ETH_P_IP)) + switch (eth->h_proto) { + case ntohs(ETH_P_IPV6): + if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) + goto out; + /* Fall through */ + case ntohs(ETH_P_IP): + break; + default: goto out; + } - proto = ip_hdr(skb)->protocol; if (proto == IPPROTO_UDP) { udph = udp_hdr(skb); @@ -635,12 +662,25 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, static void enic_preload_tcp_csum_encap(struct sk_buff *skb) { - if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); + + switch (eth->h_proto) { + case ntohs(ETH_P_IP): inner_ip_hdr(skb)->check = 0; inner_tcp_hdr(skb)->check = ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + break; + case ntohs(ETH_P_IPV6): + inner_tcp_hdr(skb)->check = + ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, 0, + IPPROTO_TCP, 0); + break; + default: + WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload"); + break; } } @@ -1898,6 +1938,8 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { + /* enable rq before updating rq desc */ + vnic_rq_enable(&enic->rq[i]); vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { @@ -1909,8 +1951,6 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->wq_count; i++) vnic_wq_enable(&enic->wq[i]); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_enable(&enic->rq[i]); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -1936,8 +1976,12 @@ static int enic_open(struct net_device *netdev) return 0; err_out_free_rq: - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + } enic_dev_notify_unset(enic); err_out_free_intr: enic_unset_affinity_hint(enic); @@ -2151,9 +2195,10 @@ static int enic_dev_wait(struct vnic_dev *vdev, static int enic_dev_open(struct enic *enic) { int err; + u32 flags = CMD_OPENF_IG_DESCCACHE; err = enic_dev_wait(enic->vdev, vnic_dev_open, - vnic_dev_open_done, 0); + vnic_dev_open_done, flags); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); @@ -2275,7 +2320,7 @@ static int enic_set_rss_nic_cfg(struct enic *enic) { struct device *dev = enic_get_dev(enic); const u8 rss_default_cpu = 0; - const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | NIC_CFG_RSS_HASH_TYPE_IPV6 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; @@ -2283,6 +2328,8 @@ static int enic_set_rss_nic_cfg(struct enic *enic) const u8 rss_base_cpu = 0; u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); + if (vnic_dev_capable_udp_rss(enic->vdev)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP; if (rss_enable) { if (!enic_set_rsskey(enic)) { if (enic_set_rsscpu(enic, rss_hash_bits)) { @@ -2901,9 +2948,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXCSUM; if (ENIC_SETTING(enic, VXLAN)) { u64 patch_level; + u64 a1 = 0; netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_TSO | + NETIF_F_TSO6 | NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_HW_CSUM | @@ -2922,9 +2971,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ err = vnic_dev_get_supported_feature_ver(enic->vdev, VIC_FEATURE_VXLAN, - &patch_level); + &patch_level, &a1); if (err) patch_level = 0; + enic->vxlan.flags = (u8)a1; /* mask bits that are supported by driver */ patch_level &= BIT_ULL(0) | BIT_ULL(2); diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 39bad67422dd..76cdd4c9d11f 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1269,16 +1269,32 @@ int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, } int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, - u64 *supported_versions) + u64 *supported_versions, u64 *a1) { u64 a0 = feature; int wait = 1000; - u64 a1 = 0; int ret; - ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, a1, wait); if (!ret) *supported_versions = a0; return ret; } + +bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev) +{ + u64 a0 = CMD_NIC_CFG, a1 = 0; + u64 rss_hash_type; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (err || !a0) + return false; + + rss_hash_type = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & + NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; + + return (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP); +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 9d43d6bb9907..59d4cc8fbb85 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -183,6 +183,7 @@ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, - u64 *supported_versions); + u64 *supported_versions, u64 *a1); +bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index d83880b0d468..41de4ba622a1 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -439,6 +439,7 @@ enum vnic_devcmd_cmd { /* flags for CMD_OPEN */ #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ +#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */ /* flags for CMD_INIT */ #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ @@ -697,6 +698,10 @@ enum overlay_ofld_cmd { #define OVERLAY_CFG_VXLAN_PORT_UPDATE 0 +#define ENIC_VXLAN_INNER_IPV6 BIT(0) +#define ENIC_VXLAN_OUTER_IPV6 BIT(1) +#define ENIC_VXLAN_MULTI_WQ BIT(2) + /* Use this enum to get the supported versions for each of these features * If you need to use the devcmd_get_supported_feature_version(), add * the new feature into this enum and install function handler in devcmd.c diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h index 995a50dd4c99..5a93db0d7afc 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_nic.h +++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h @@ -47,6 +47,7 @@ #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) #define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) +#define NIC_CFG_RSS_HASH_TYPE_UDP (1 << 7) static inline void vnic_set_nic_cfg(u32 *nic_cfg, u8 rss_default_cpu, u8 rss_hash_type, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5eb999af2c40..bd3f6e4d1341 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev) if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base it not aligned\n"); + kfree(skb_tab); return -ENOMEM; } diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 7ec2d74f94d3..680a6d983f37 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -4,7 +4,7 @@ config DM9000 tristate "DM9000 support" - depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2 + depends on ARM || MIPS || COLDFIRE || NIOS2 select CRC32 select MII ---help--- diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 1b79a6defd56..d71cba0842c5 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -602,7 +602,7 @@ static struct pci_driver pci_driver = { }; module_pci_driver(pci_driver); -module_param(polling_frequency, long, S_IRUGO); +module_param(polling_frequency, long, 0444); MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1a49297224ed..ff92ab1daeb8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -19,7 +19,7 @@ #include "be.h" #include "be_cmds.h" -char *be_misconfig_evt_port_state[] = { +const char * const be_misconfig_evt_port_state[] = { "Physical Link is functional", "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.", "Optics of two types installed – Remove one optic or install matching pair of optics.", diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 09da2d82c2f0..e8b43cf44b6f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -201,7 +201,7 @@ enum { phy_state == BE_PHY_UNQUALIFIED || \ phy_state == BE_PHY_UNCERTIFIED) -extern char *be_misconfig_evt_port_state[]; +extern const char * const be_misconfig_evt_port_state[]; /* async event indicating misconfigured port */ struct be_async_event_misconfig_port { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5774fb6f8aa0..c697e79e491e 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -34,11 +34,11 @@ MODULE_LICENSE("GPL"); * Use sysfs method to enable/disable VFs. */ static unsigned int num_vfs; -module_param(num_vfs, uint, S_IRUGO); +module_param(num_vfs, uint, 0444); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); static ushort rx_frag_size = 2048; -module_param(rx_frag_size, ushort, S_IRUGO); +module_param(rx_frag_size, ushort, 0444); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); /* Per-module error detection/recovery workq shared across all functions. @@ -5788,7 +5788,7 @@ static ssize_t be_hwmon_show_temp(struct device *dev, adapter->hwmon_info.be_on_die_temp * 1000); } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, +static SENSOR_DEVICE_ATTR(temp1_input, 0444, be_hwmon_show_temp, NULL, 1); static struct attribute *be_hwmon_attrs[] = { diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index 040c7f163325..0fb8df656677 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_FARADAY bool "Faraday devices" default y - depends on ARM + depends on ARM || NDS32 || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,7 +18,8 @@ if NET_VENDOR_FARADAY config FTMAC100 tristate "Faraday FTMAC100 10/100 Ethernet support" - depends on ARM + depends on ARM || NDS32 || COMPILE_TEST + depends on !64BIT || BROKEN select MII ---help--- This driver supports the FTMAC100 10/100 Ethernet controller @@ -27,7 +28,8 @@ config FTMAC100 config FTGMAC100 tristate "Faraday FTGMAC100 Gigabit Ethernet support" - depends on ARM + depends on ARM || NDS32 || COMPILE_TEST + depends on !64BIT || BROKEN select PHYLIB ---help--- This driver supports the FTGMAC100 Gigabit Ethernet controller diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 7caa8da48421..fd43f98ddbe7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -454,6 +454,16 @@ static void dpaa_set_rx_mode(struct net_device *net_dev) err); } + if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { + priv->mac_dev->allmulti = !priv->mac_dev->allmulti; + err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac, + priv->mac_dev->allmulti); + if (err < 0) + netif_err(priv, drv, net_dev, + "mac_dev->set_allmulti() = %d\n", + err); + } + err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); if (err < 0) netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", @@ -1916,8 +1926,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto csum_failed; } + /* SGT[0] is used by the linear part */ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); - qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); + frag_len = skb_headlen(skb); + qm_sg_entry_set_len(&sgt[0], frag_len); sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].offset = 0; addr = dma_map_single(dev, skb->data, @@ -1930,9 +1942,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, qm_sg_entry_set64(&sgt[0], addr); /* populate the rest of SGT entries */ - frag = &skb_shinfo(skb)->frags[0]; - frag_len = frag->size; - for (i = 1; i <= nr_frags; i++, frag++) { + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + frag_len = frag->size; WARN_ON(!skb_frag_page(frag)); addr = skb_frag_dma_map(dev, frag, 0, frag_len, dma_dir); @@ -1942,15 +1954,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto sg_map_failed; } - qm_sg_entry_set_len(&sgt[i], frag_len); - sgt[i].bpid = FSL_DPAA_BPID_INV; - sgt[i].offset = 0; + qm_sg_entry_set_len(&sgt[i + 1], frag_len); + sgt[i + 1].bpid = FSL_DPAA_BPID_INV; + sgt[i + 1].offset = 0; /* keep the offset in the address */ - qm_sg_entry_set64(&sgt[i], addr); - frag_len = frag->size; + qm_sg_entry_set64(&sgt[i + 1], addr); } - qm_sg_entry_set_f(&sgt[i - 1], frag_len); + + /* Set the final bit in the last used entry of the SGT */ + qm_sg_entry_set_f(&sgt[nr_frags], frag_len); qm_fd_set_sg(fd, priv->tx_headroom, skb->len); @@ -2008,7 +2021,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, } if (unlikely(err < 0)) { - percpu_stats->tx_errors++; percpu_stats->tx_fifo_errors++; return err; } @@ -2052,19 +2064,23 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; * make sure we don't feed FMan with more fragments than it supports. */ - if (nonlinear && - likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { - /* Just create a S/G fd based on the skb */ - err = skb_to_sg_fd(priv, skb, &fd); - percpu_priv->tx_frag_skbuffs++; - } else { + if (unlikely(nonlinear && + (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) { /* If the egress skb contains more fragments than we support * we have no choice but to linearize it ourselves. */ - if (unlikely(nonlinear) && __skb_linearize(skb)) + if (__skb_linearize(skb)) goto enomem; - /* Finally, create a contig FD from this skb */ + nonlinear = skb_is_nonlinear(skb); + } + + if (nonlinear) { + /* Just create a S/G fd based on the skb */ + err = skb_to_sg_fd(priv, skb, &fd); + percpu_priv->tx_frag_skbuffs++; + } else { + /* Create a contig FD from this skb */ err = skb_to_contig_fd(priv, skb, &fd, &offset); } if (unlikely(err < 0)) @@ -2201,14 +2217,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, if (dpaa_eth_napi_schedule(percpu_priv, portal)) return qman_cb_dqrr_stop; - if (dpaa_eth_refill_bpools(priv)) - /* Unable to refill the buffer pool due to insufficient - * system memory. Just release the frame back into the pool, - * otherwise we'll soon end up with an empty buffer pool. - */ - dpaa_fd_release(net_dev, &dq->fd); - else - dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); + dpaa_eth_refill_bpools(priv); + dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } @@ -2278,7 +2288,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, vaddr = phys_to_virt(addr); prefetch(vaddr + qm_fd_get_offset(fd)); - fd_format = qm_fd_get_format(fd); /* The only FD types that we may receive are contig and S/G */ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); @@ -2311,8 +2320,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, skb_len = skb->len; - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { + percpu_stats->rx_dropped++; return qman_cb_dqrr_consume; + } percpu_stats->rx_packets++; percpu_stats->rx_bytes += skb_len; @@ -2766,7 +2777,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->channel = (u16)channel; - /* Start a thread that will walk the CPUs with affine portals + /* Walk the CPUs with affine portals * and add this pool channel to each's dequeue mask. */ dpaa_eth_add_channel(priv->channel); @@ -2860,7 +2871,7 @@ static int dpaa_remove(struct platform_device *pdev) struct device *dev; int err; - dev = &pdev->dev; + dev = pdev->dev.parent; net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index faea674094b9..2f933b6b2f4e 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, if (epause->rx_pause) newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; if (epause->tx_pause) - newadv |= ADVERTISED_Asym_Pause; + newadv ^= ADVERTISED_Asym_Pause; oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); @@ -344,7 +344,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev, /* gather congestion related counters */ cg_num = 0; - cg_status = 0; + cg_status = false; cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) { cg_num = priv->cgr_data.cgr_congested_count; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7a7f3a42b2aa..d4604bc8eb5b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 8870a9a798ca..dc0850b3b517 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -2,7 +2,6 @@ config FSL_FMAN tristate "FMan support" depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST select GENERIC_ALLOCATOR - depends on HAS_DMA select PHYLIB default n help diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index ea43b4974149..57b1e2b47c0a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) set_bucket(dtsec->regs, bucket, true); /* Create element to be added to the driver hash table */ - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM; hash_entry->addr = addr; @@ -1117,6 +1117,25 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) return 0; } +int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) +{ + u32 tmp; + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + tmp = ioread32be(®s->rctrl); + if (enable) + tmp |= RCTRL_MPROM; + else + tmp &= ~RCTRL_MPROM; + + iowrite32be(tmp, ®s->rctrl); + + return 0; +} + int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index c4467c072058..1a689adf5a22 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -55,5 +55,6 @@ int dtsec_set_exception(struct fman_mac *dtsec, int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); +int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable); #endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index c0296880feba..446a97b792e3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -350,6 +350,7 @@ struct fman_mac { struct fman_rev_info fm_rev_info; bool basex_if; struct phy_device *pcsphy; + bool allmulti_enabled; }; static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr, @@ -940,6 +941,29 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) return 0; } +int memac_set_allmulti(struct fman_mac *memac, bool enable) +{ + u32 entry; + struct memac_regs __iomem *regs = memac->regs; + + if (!is_init_done(memac->memac_drv_param)) + return -EINVAL; + + if (enable) { + for (entry = 0; entry < HASH_TABLE_SIZE; entry++) + iowrite32be(entry | HASH_CTRL_MCAST_EN, + ®s->hashtable_ctrl); + } else { + for (entry = 0; entry < HASH_TABLE_SIZE; entry++) + iowrite32be(entry & ~HASH_CTRL_MCAST_EN, + ®s->hashtable_ctrl); + } + + memac->allmulti_enabled = enable; + + return 0; +} + int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) { struct memac_regs __iomem *regs = memac->regs; @@ -963,8 +987,12 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) break; } } - if (list_empty(&memac->multicast_addr_hash->lsts[hash])) - iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl); + + if (!memac->allmulti_enabled) { + if (list_empty(&memac->multicast_addr_hash->lsts[hash])) + iowrite32be(hash & ~HASH_CTRL_MCAST_EN, + ®s->hashtable_ctrl); + } return 0; } diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index c4a66469a907..b5a50338ed9a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -57,5 +57,6 @@ int memac_set_exception(struct fman_mac *memac, enum fman_mac_exceptions exception, bool enable); int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); +int memac_set_allmulti(struct fman_mac *memac, bool enable); #endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 4b0f3a50b293..284735d4ebe9 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -217,6 +217,7 @@ struct fman_mac { struct tgec_cfg *cfg; void *fm; struct fman_rev_info fm_rev_info; + bool allmulti_enabled; }; static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr) @@ -564,6 +565,29 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) return 0; } +int tgec_set_allmulti(struct fman_mac *tgec, bool enable) +{ + u32 entry; + struct tgec_regs __iomem *regs = tgec->regs; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + if (enable) { + for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++) + iowrite32be(entry | TGEC_HASH_MCAST_EN, + ®s->hashtable_ctrl); + } else { + for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++) + iowrite32be(entry & ~TGEC_HASH_MCAST_EN, + ®s->hashtable_ctrl); + } + + tgec->allmulti_enabled = enable; + + return 0; +} + int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) { struct tgec_regs __iomem *regs = tgec->regs; @@ -591,9 +615,12 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) break; } } - if (list_empty(&tgec->multicast_addr_hash->lsts[hash])) - iowrite32be((hash & ~TGEC_HASH_MCAST_EN), - ®s->hashtable_ctrl); + + if (!tgec->allmulti_enabled) { + if (list_empty(&tgec->multicast_addr_hash->lsts[hash])) + iowrite32be((hash & ~TGEC_HASH_MCAST_EN), + ®s->hashtable_ctrl); + } return 0; } diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index 514bba9f47ce..cbbd3b422a98 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -51,5 +51,6 @@ int tgec_set_exception(struct fman_mac *tgec, int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); +int tgec_set_allmulti(struct fman_mac *tgec, bool enable); #endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 88c0a0636b44..7b5b95f52c09 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -470,6 +470,7 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; + mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -488,6 +489,7 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_tx_pause = tgec_set_tx_pause_frames; mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; + mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -506,6 +508,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_tx_pause = memac_set_tx_pause_frames; mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; + mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -564,7 +567,6 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, } pdev->dev.parent = priv->dev; - set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); ret = platform_device_add_data(pdev, &data, sizeof(data)); if (ret) diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index eefb3357e304..b520cec120ee 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -59,6 +59,7 @@ struct mac_device { bool rx_pause_active; bool tx_pause_active; bool promisc; + bool allmulti; int (*init)(struct mac_device *mac_dev); int (*start)(struct mac_device *mac_dev); @@ -66,6 +67,7 @@ struct mac_device { void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); + int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3bdeb295514b..f27f9bae1a4a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, { int size = lstatus & BD_LENGTH_MASK; struct page *page = rxb->page; - bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); - - /* Remove the FCS from the packet length */ - if (last) - size -= ETH_FCS_LEN; if (likely(first)) { skb_put(skb, size); } else { /* the last fragments' length contains the full frame length */ - if (last) + if (lstatus & BD_LFLAG(RXBD_LAST)) size -= skb->len; - /* Add the last fragment if it contains something other than - * the FCS, otherwise drop it and trim off any part of the FCS - * that was already received. - */ - if (size > 0) - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rxb->page_offset + RXBUF_ALIGNMENT, - size, GFAR_RXB_TRUESIZE); - else if (size < 0) - pskb_trim(skb, skb->len + size); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); } /* try reuse page */ @@ -3069,12 +3057,12 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) if (priv->padding) skb_pull(skb, priv->padding); + /* Trim off the FCS */ + pskb_trim(skb, skb->len - ETH_FCS_LEN); + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); - /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, ndev); - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. @@ -3145,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) continue; } + gfar_process_frame(ndev, skb); + /* Increment the number of packets */ total_pkts++; total_bytes += skb->len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); /* Send the packet up the stack */ napi_gro_receive(&rx_queue->grp->napi_rx, skb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 86944bc3b273..74bd260ca02a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) static int hns_gmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_gmac_stats_string); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index cac86e9ae0dd..9dcc5765f11f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -369,7 +369,6 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; - u8 addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct dsaf_drv_mac_single_dest_entry mac_entry; /* directy return ok in debug network mode */ @@ -377,7 +376,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, return 0; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { - memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); + eth_broadcast_addr(mac_entry.addr); mac_entry.in_vlan_id = vlan_id; mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; @@ -404,7 +403,6 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; u8 port_num; - u8 addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mac_entry_idx *uc_mac_entry; struct dsaf_drv_mac_single_dest_entry mac_entry; @@ -414,7 +412,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) uc_mac_entry = &mac_cb->addr_entry_idx[vmid]; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { - memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); + eth_broadcast_addr(mac_entry.addr); mac_entry.in_vlan_id = uc_mac_entry->vlan_id; mac_entry.in_port_num = mac_cb->mac_id; ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index b62816c1574e..93e71e27401b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb) int hns_ppe_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ETH_PPE_STATIC_NUM; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6f3570cfb501..e2e28532e4dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) */ int hns_rcb_get_ring_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return HNS_RING_STATIC_REG_NUM; return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7ea7f8a4aa2a..2e14a3ae1d8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) cnt--; return cnt; - } else { + } else if (stringset == ETH_SS_STATS) { return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); + } else { + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 3e9203ea42a6..519e2bd6aa60 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -11,6 +11,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ @@ -57,6 +58,8 @@ enum hclge_mbx_vlan_cfg_subcode { #define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 +#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 struct hclgevf_mbx_resp_status { struct mutex mbx_mutex; /* protects against contending sync cmd resp */ @@ -83,6 +86,21 @@ struct hclge_mbx_pf_to_vf_cmd { u16 msg[8]; }; +/* used by VF to store the received Async responses from PF */ +struct hclgevf_mbx_arq_ring { +#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 +#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024 + struct hclgevf_dev *hdev; + u32 head; + u32 tail; + u32 count; + u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; +}; + #define hclge_mbx_ring_ptr_move_crq(crq) \ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) +#define hclge_mbx_tail_ptr_move_arq(arq) \ + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) +#define hclge_mbx_head_ptr_move_arq(arq) \ + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index fd06bc78c58e..37ec1b3286c6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -118,6 +118,8 @@ enum hnae3_reset_notify_type { }; enum hnae3_reset_type { + HNAE3_VF_RESET, + HNAE3_VF_FULL_RESET, HNAE3_FUNC_RESET, HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, @@ -265,6 +267,8 @@ struct hnae3_ae_dev { * Get tc size of handle * get_vector() * Get vector number and vector information + * put_vector() + * Put the vector in hdev * map_ring_to_vector() * Map rings to vector * unmap_ring_from_vector() @@ -336,7 +340,8 @@ struct hnae3_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); - int (*set_mac_addr)(struct hnae3_handle *handle, void *p); + int (*set_mac_addr)(struct hnae3_handle *handle, void *p, + bool is_first); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -375,6 +380,7 @@ struct hnae3_ae_ops { int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, struct hnae3_vector_info *vector_info); + int (*put_vector)(struct hnae3_handle *handle, int vector_num); int (*map_ring_to_vector)(struct hnae3_handle *handle, int vector_num, struct hnae3_ring_chain_node *vr_chain); @@ -396,8 +402,7 @@ struct hnae3_ae_ops { int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); - void (*reset_event)(struct hnae3_handle *handle, - enum hnae3_reset_type reset); + void (*reset_event)(struct hnae3_handle *handle); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, @@ -407,6 +412,10 @@ struct hnae3_ae_ops { u32 *flowctrl_adv); int (*set_led_id)(struct hnae3_handle *handle, enum ethtool_phys_id_state status); + void (*get_link_mode)(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising); + void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); }; struct hnae3_dcb_ops { @@ -487,6 +496,9 @@ struct hnae3_handle { struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ u64 flags; /* Indicate the capabilities for this handle*/ + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + union { struct net_device *netdev; /* first member */ struct hnae3_knic_private_info kinfo; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 601b6295d3f8..8c55965a66ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -168,8 +168,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable && - !tqp_vector->rx_group.gl_adapt_enable) + if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && + !tqp_vector->rx_group.coal.gl_adapt_enable) /* According to the hardware, the range of rl_reg is * 0-59 and the unit is 4. */ @@ -205,23 +205,30 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, */ /* Default: enable interrupt coalescing self-adaptive and GL */ - tqp_vector->tx_group.gl_adapt_enable = 1; - tqp_vector->rx_group.gl_adapt_enable = 1; + tqp_vector->tx_group.coal.gl_adapt_enable = 1; + tqp_vector->rx_group.coal.gl_adapt_enable = 1; - tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; - tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; - - hns3_set_vector_coalesce_tx_gl(tqp_vector, - tqp_vector->tx_group.int_gl); - hns3_set_vector_coalesce_rx_gl(tqp_vector, - tqp_vector->rx_group.int_gl); + tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; + tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; /* Default: disable RL */ h->kinfo.int_rl_setting = 0; - hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); - tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; + tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; +} + +static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + + hns3_set_vector_coalesce_tx_gl(tqp_vector, + tqp_vector->tx_group.coal.int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, + tqp_vector->rx_group.coal.int_gl); + hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); } static int hns3_nic_set_real_num_queue(struct net_device *netdev) @@ -249,6 +256,16 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) return 0; } +static u16 hns3_get_max_available_channels(struct hnae3_handle *h) +{ + u16 free_tqps, max_rss_size, max_tqps; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); + max_tqps = h->kinfo.num_tc * max_rss_size; + + return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -303,7 +320,7 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } - priv->last_reset_time = jiffies; + priv->ae_handle->last_reset_time = jiffies; return 0; } @@ -747,7 +764,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_M, 0); + HNS3_TXD_BDTYPE_S, 0); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); @@ -1104,7 +1121,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; - ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); return ret; @@ -1388,11 +1405,15 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + if (!ret) + set_bit(vid, priv->active_vlans); + return ret; } @@ -1400,14 +1421,32 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + if (!ret) + clear_bit(vid, priv->active_vlans); + return ret; } +static void hns3_restore_vlan(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + u16 vid; + int ret; + + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { + ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + if (ret) + netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", + vid, ret); + } +} + static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { @@ -1504,7 +1543,6 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) static void hns3_nic_net_timeout(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); - unsigned long last_reset_time = priv->last_reset_time; struct hnae3_handle *h = priv->ae_handle; if (!hns3_get_tx_timeo_queue_info(ndev)) @@ -1512,24 +1550,12 @@ static void hns3_nic_net_timeout(struct net_device *ndev) priv->tx_timeout_count++; - /* This timeout is far away enough from last timeout, - * if timeout again,set the reset type to PF reset - */ - if (time_after(jiffies, (last_reset_time + 20 * HZ))) - priv->reset_level = HNAE3_FUNC_RESET; - - /* Don't do any new action before the next timeout */ - else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) + if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) return; - priv->last_reset_time = jiffies; - + /* request the reset */ if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h, priv->reset_level); - - priv->reset_level++; - if (priv->reset_level > HNAE3_GLOBAL_RESET) - priv->reset_level = HNAE3_GLOBAL_RESET; + h->ae_algo->ops->reset_event(h); } static const struct net_device_ops hns3_nic_netdev_ops = { @@ -1588,10 +1614,6 @@ static void hns3_remove(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); hnae3_unregister_ae_dev(ae_dev); - - devm_kfree(&pdev->dev, ae_dev); - - pci_set_drvdata(pdev, NULL); } static struct pci_driver hns3_driver = { @@ -2064,15 +2086,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - if (twobufs) { - truesize = hnae_buf_size(ring); - } else { - truesize = ALIGN(size, L1_CACHE_BYTES); + truesize = hnae_buf_size(ring); + + if (!twobufs) last_offset = hnae_page_size(ring) - hnae_buf_size(ring); - } skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); + size - pull_len, truesize); /* Avoid re-using remote pages,flag default unreuse */ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) @@ -2369,20 +2389,20 @@ out: static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) { -#define HNS3_RX_ULTRA_PACKET_RATE 40000 + struct hns3_enet_tqp_vector *tqp_vector = + ring_group->ring->tqp_vector; enum hns3_flow_level_range new_flow_level; - struct hns3_enet_tqp_vector *tqp_vector; - int packets_per_secs; - int bytes_per_usecs; + int packets_per_msecs; + int bytes_per_msecs; + u32 time_passed_ms; u16 new_int_gl; - int usecs; - if (!ring_group->int_gl) + if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) return false; if (ring_group->total_packets == 0) { - ring_group->int_gl = HNS3_INT_GL_50K; - ring_group->flow_level = HNS3_FLOW_LOW; + ring_group->coal.int_gl = HNS3_INT_GL_50K; + ring_group->coal.flow_level = HNS3_FLOW_LOW; return true; } @@ -2392,35 +2412,46 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) * 20-1249MB/s high (18000 ints/s) * > 40000pps ultra (8000 ints/s) */ - new_flow_level = ring_group->flow_level; - new_int_gl = ring_group->int_gl; - tqp_vector = ring_group->ring->tqp_vector; - usecs = (ring_group->int_gl << 1); - bytes_per_usecs = ring_group->total_bytes / usecs; - /* 1000000 microseconds */ - packets_per_secs = ring_group->total_packets * 1000000 / usecs; + new_flow_level = ring_group->coal.flow_level; + new_int_gl = ring_group->coal.int_gl; + time_passed_ms = + jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); + + if (!time_passed_ms) + return false; + + do_div(ring_group->total_packets, time_passed_ms); + packets_per_msecs = ring_group->total_packets; + + do_div(ring_group->total_bytes, time_passed_ms); + bytes_per_msecs = ring_group->total_bytes; + +#define HNS3_RX_LOW_BYTE_RATE 10000 +#define HNS3_RX_MID_BYTE_RATE 20000 switch (new_flow_level) { case HNS3_FLOW_LOW: - if (bytes_per_usecs > 10) + if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) new_flow_level = HNS3_FLOW_MID; break; case HNS3_FLOW_MID: - if (bytes_per_usecs > 20) + if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_usecs <= 10) + else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) new_flow_level = HNS3_FLOW_LOW; break; case HNS3_FLOW_HIGH: case HNS3_FLOW_ULTRA: default: - if (bytes_per_usecs <= 20) + if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) new_flow_level = HNS3_FLOW_MID; break; } - if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && - (&tqp_vector->rx_group == ring_group)) +#define HNS3_RX_ULTRA_PACKET_RATE 40 + + if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && + &tqp_vector->rx_group == ring_group) new_flow_level = HNS3_FLOW_ULTRA; switch (new_flow_level) { @@ -2442,9 +2473,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ring_group->total_bytes = 0; ring_group->total_packets = 0; - ring_group->flow_level = new_flow_level; - if (new_int_gl != ring_group->int_gl) { - ring_group->int_gl = new_int_gl; + ring_group->coal.flow_level = new_flow_level; + if (new_int_gl != ring_group->coal.int_gl) { + ring_group->coal.int_gl = new_int_gl; return true; } return false; @@ -2456,19 +2487,27 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; bool rx_update, tx_update; - if (rx_group->gl_adapt_enable) { + if (tqp_vector->int_adapt_down > 0) { + tqp_vector->int_adapt_down--; + return; + } + + if (rx_group->coal.gl_adapt_enable) { rx_update = hns3_get_new_int_gl(rx_group); if (rx_update) hns3_set_vector_coalesce_rx_gl(tqp_vector, - rx_group->int_gl); + rx_group->coal.int_gl); } - if (tx_group->gl_adapt_enable) { + if (tx_group->coal.gl_adapt_enable) { tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, - tx_group->int_gl); + tx_group->coal.int_gl); } + + tqp_vector->last_jiffies = jiffies; + tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; } static int hns3_nic_common_poll(struct napi_struct *napi, int budget) @@ -2615,32 +2654,18 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_vector_info *vector; - struct pci_dev *pdev = h->pdev; - u16 tqp_num = h->kinfo.num_tqps; - u16 vector_num; int ret = 0; u16 i; - /* RSS size, cpu online and vector_num should be the same */ - /* Should consider 2p/4p later */ - vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), - GFP_KERNEL); - if (!vector) - return -ENOMEM; - - vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); - - priv->vector_num = vector_num; - priv->tqp_vector = (struct hns3_enet_tqp_vector *) - devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), - GFP_KERNEL); - if (!priv->tqp_vector) - return -ENOMEM; + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_vector_gl_rl_init_hw(tqp_vector, priv); + tqp_vector->num_tqps = 0; + } - for (i = 0; i < tqp_num; i++) { - u16 vector_i = i % vector_num; + for (i = 0; i < h->kinfo.num_tqps; i++) { + u16 vector_i = i % priv->vector_num; + u16 tqp_num = h->kinfo.num_tqps; tqp_vector = &priv->tqp_vector[vector_i]; @@ -2650,52 +2675,94 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) hns3_add_ring_to_group(&tqp_vector->rx_group, priv->ring_data[i + tqp_num].ring); - tqp_vector->idx = vector_i; - tqp_vector->mask_addr = vector[vector_i].io_addr; - tqp_vector->vector_irq = vector[vector_i].vector; - tqp_vector->num_tqps++; - priv->ring_data[i].ring->tqp_vector = tqp_vector; priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + tqp_vector->num_tqps++; } - for (i = 0; i < vector_num; i++) { + for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; tqp_vector->rx_group.total_bytes = 0; tqp_vector->rx_group.total_packets = 0; tqp_vector->tx_group.total_bytes = 0; tqp_vector->tx_group.total_packets = 0; - hns3_vector_gl_rl_init(tqp_vector, priv); tqp_vector->handle = h; ret = hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - goto out; + return ret; ret = h->ae_algo->ops->map_ring_to_vector(h, tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - goto out; hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + if (ret) + return ret; + netif_napi_add(priv->netdev, &tqp_vector->napi, hns3_nic_common_poll, NAPI_POLL_WEIGHT); } + return 0; +} + +static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + tqp_vector->idx = i; + tqp_vector->mask_addr = vector[i].io_addr; + tqp_vector->vector_irq = vector[i].vector; + hns3_vector_gl_rl_init(tqp_vector, priv); + } + out: devm_kfree(&pdev->dev, vector); return ret; } +static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) +{ + group->ring = NULL; + group->count = 0; +} + static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - struct pci_dev *pdev = h->pdev; int i, ret; for (i = 0; i < priv->vector_num; i++) { @@ -2711,6 +2778,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; + ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); + if (ret) + return ret; + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { @@ -2722,12 +2793,30 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; - + hns3_clear_ring_group(&tqp_vector->rx_group); + hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } - devm_kfree(&pdev->dev, priv->tqp_vector); + return 0; +} + +static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector; + + tqp_vector = &priv->tqp_vector[i]; + ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); + if (ret) + return ret; + } + devm_kfree(&pdev->dev, priv->tqp_vector); return 0; } @@ -2957,13 +3046,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); - devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); - devm_kfree(priv->dev, - priv->ring_data[i + h->kinfo.num_tqps].ring); } - devm_kfree(priv->dev, priv->ring_data); - return 0; } @@ -2987,7 +3071,7 @@ static void hns3_init_mac_addr(struct net_device *netdev) } if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); } @@ -3013,7 +3097,7 @@ static int hns3_client_init(struct hnae3_handle *handle) int ret; netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - handle->kinfo.num_tqps); + hns3_get_max_available_channels(handle)); if (!netdev) return -ENOMEM; @@ -3021,8 +3105,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->last_reset_time = jiffies; - priv->reset_level = HNAE3_FUNC_RESET; + priv->ae_handle->reset_level = HNAE3_NONE_RESET; + priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; handle->kinfo.netdev = netdev; @@ -3048,6 +3132,12 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_get_ring_cfg; } + ret = hns3_nic_alloc_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_alloc_vector_data; + } + ret = hns3_nic_init_vector_data(priv); if (ret) { ret = -ENOMEM; @@ -3076,8 +3166,10 @@ static int hns3_client_init(struct hnae3_handle *handle) out_reg_netdev_fail: out_init_ring_data: (void)hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; out_init_vector_data: + hns3_nic_dealloc_vector_data(priv); +out_alloc_vector_data: + priv->ring_data = NULL; out_get_ring_cfg: priv->ae_handle = NULL; free_netdev(netdev); @@ -3097,10 +3189,16 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (ret) netdev_err(netdev, "uninit vector error\n"); + ret = hns3_nic_dealloc_vector_data(priv); + if (ret) + netdev_err(netdev, "dealloc vector error\n"); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; free_netdev(netdev); @@ -3240,7 +3338,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; if (netif_running(kinfo->netdev)) { @@ -3250,8 +3347,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) "hns net up fail, ret=%d!\n", ret); return ret; } - - priv->last_reset_time = jiffies; + handle->last_reset_time = jiffies; } return ret; @@ -3263,11 +3359,14 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - priv->reset_level = 1; hns3_init_mac_addr(netdev); hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + /* Hardware table is only clear when pf resets */ + if (!(handle->flags & HNAE3_SUPPORT_VF)) + hns3_restore_vlan(netdev); + /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -3306,6 +3405,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; return ret; @@ -3336,18 +3437,24 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static u16 hns3_get_max_available_channels(struct net_device *netdev) +static void hns3_restore_coal(struct hns3_nic_priv *priv, + struct hns3_enet_coalesce *tx, + struct hns3_enet_coalesce *rx) { - struct hnae3_handle *h = hns3_get_handle(netdev); - u16 free_tqps, max_rss_size, max_tqps; - - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + u16 vector_num = priv->vector_num; + int i; - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, tx, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, rx, + sizeof(struct hns3_enet_coalesce)); + } } -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, + struct hns3_enet_coalesce *tx, + struct hns3_enet_coalesce *rx) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3361,6 +3468,12 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) if (ret) return ret; + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + goto err_alloc_vector; + + hns3_restore_coal(priv, tx, rx); + ret = hns3_nic_init_vector_data(priv); if (ret) goto err_uninit_vector; @@ -3375,6 +3488,8 @@ err_put_ring: hns3_put_ring_config(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); +err_alloc_vector: + hns3_nic_dealloc_vector_data(priv); return ret; } @@ -3389,6 +3504,7 @@ int hns3_set_channels(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; + struct hns3_enet_coalesce tx_coal, rx_coal; bool if_running = netif_running(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; @@ -3397,12 +3513,12 @@ int hns3_set_channels(struct net_device *netdev, if (ch->rx_count || ch->tx_count) return -EINVAL; - if (new_tqp_num > hns3_get_max_available_channels(netdev) || + if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < kinfo->num_tc) { dev_err(&netdev->dev, "Change tqps fail, the tqp range is from %d to %d", kinfo->num_tc, - hns3_get_max_available_channels(netdev)); + hns3_get_max_available_channels(h)); return -EINVAL; } @@ -3411,7 +3527,7 @@ int hns3_set_channels(struct net_device *netdev, return 0; if (if_running) - dev_close(netdev); + hns3_nic_net_stop(netdev); hns3_clear_all_ring(h); @@ -3422,12 +3538,26 @@ int hns3_set_channels(struct net_device *netdev, goto open_netdev; } + /* Changing the tqp num may also change the vector num, + * ethtool only support setting and querying one coal + * configuation for now, so save the vector 0' coal + * configuation here in order to restore it. + */ + memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); + + hns3_nic_dealloc_vector_data(priv); + hns3_uninit_all_ring(priv); + hns3_put_ring_config(priv); org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num); + ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num); + ret = hns3_modify_tqp_num(netdev, org_tqp_num, + &tx_coal, &rx_coal); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, @@ -3440,7 +3570,7 @@ int hns3_set_channels(struct net_device *netdev, open_netdev: if (if_running) - dev_open(netdev); + hns3_nic_net_open(netdev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 213f501b30bb..98cdbd3a1163 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -10,6 +10,8 @@ #ifndef __HNS3_ENET_H #define __HNS3_ENET_H +#include <linux/if_vlan.h> + #include "hnae3.h" extern const char hns3_driver_version[]; @@ -286,7 +288,7 @@ struct hns3_desc_cb { u16 page_offset; u16 reuse_flag; - u16 length; /* length of the buffer */ + u32 length; /* length of the buffer */ /* desc type, used by the ring user to mark the type of the priv data */ u16 type; @@ -460,15 +462,21 @@ enum hns3_link_mode_bits { #define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_ENABLE_MASK 0x40 +#define HNS3_INT_ADAPT_DOWN_START 100 + +struct hns3_enet_coalesce { + u16 int_gl; + u8 gl_adapt_enable; + enum hns3_flow_level_range flow_level; +}; + struct hns3_enet_ring_group { /* array of pointers to rings */ struct hns3_enet_ring *ring; u64 total_bytes; /* total bytes processed this group */ u64 total_packets; /* total packets processed this group */ u16 count; - enum hns3_flow_level_range flow_level; - u16 int_gl; - u8 gl_adapt_enable; + struct hns3_enet_coalesce coal; }; struct hns3_enet_tqp_vector { @@ -491,6 +499,7 @@ struct hns3_enet_tqp_vector { /* when 0 should adjust interrupt coalesce parameter */ u8 int_adapt_down; + unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; enum hns3_udp_tnl_type { @@ -523,8 +532,6 @@ struct hns3_nic_priv { /* The most recently read link state */ int link; u64 tx_timeout_count; - enum hnae3_reset_type reset_level; - unsigned long last_reset_time; unsigned long state; @@ -535,6 +542,7 @@ struct hns3_nic_priv { struct notifier_block notifier_block; /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index b034c7f24eda..eb3c34f3cf87 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -74,19 +74,6 @@ struct hns3_link_mode_mapping { u32 ethtool_link_mode; }; -static const struct hns3_link_mode_mapping hns3_lm_map[] = { - {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, - {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, - {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, - {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, - {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, -}; - static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -309,6 +296,9 @@ static void hns3_self_test(struct net_device *ndev, struct hnae3_handle *h = priv->ae_handle; int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; bool if_running = netif_running(ndev); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + bool dis_vlan_filter; +#endif int test_index = 0; u32 i; @@ -323,6 +313,14 @@ static void hns3_self_test(struct net_device *ndev, if (if_running) dev_close(ndev); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + /* Disable the vlan filter for selftest does not support it */ + dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && + h->ae_algo->ops->enable_vlan_filter; + if (dis_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, false); +#endif + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { @@ -345,28 +343,15 @@ static void hns3_self_test(struct net_device *ndev, clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (dis_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + if (if_running) dev_open(ndev); } -static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, - bool is_advertised) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { - if (!(caps & hns3_lm_map[i].hns3_link_mode)) - continue; - - if (is_advertised) - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.advertising); - else - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.supported); - } -} - static int hns3_get_sset_count(struct net_device *netdev, int stringset) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -578,18 +563,19 @@ static int hns3_get_link_ksettings(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); u32 flowctrl_adv = 0; - u32 supported_caps; - u32 advertised_caps; - u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) + if (netdev->phydev) { phy_ethtool_ksettings_get(netdev->phydev, cmd); - else if (h->ae_algo->ops->get_ksettings_an_result) + + return 0; + } + + if (h->ae_algo->ops->get_ksettings_an_result) h->ae_algo->ops->get_ksettings_an_result(h, &cmd->base.autoneg, &cmd->base.speed, @@ -603,62 +589,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.media_type get from bios parameter block */ - if (h->ae_algo->ops->get_media_type) { - h->ae_algo->ops->get_media_type(h, &media_type); - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; - supported_caps = HNS3_LM_FIBRE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_COPPER: - cmd->base.port = PORT_TP; - supported_caps = HNS3_LM_TP_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_BACKPLANE: - cmd->base.port = PORT_NONE; - supported_caps = HNS3_LM_BACKPLANE_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - cmd->base.port = PORT_OTHER; - supported_caps = 0; - advertised_caps = 0; - break; - } - - if (!cmd->base.autoneg) - advertised_caps &= ~HNS3_LM_AUTONEG_BIT; - - advertised_caps &= ~HNS3_LM_PAUSE_BIT; + /* 2.get link mode and port type*/ + if (h->ae_algo->ops->get_link_mode) + h->ae_algo->ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); - /* now, map driver link modes to ethtool link modes */ - hns3_driv_to_eth_caps(supported_caps, cmd, false); - hns3_driv_to_eth_caps(advertised_caps, cmd, true); - } + cmd->base.port = PORT_NONE; + if (h->ae_algo->ops->get_port_type) + h->ae_algo->ops->get_port_type(h, + &cmd->base.port); /* 3.mdix_ctrl&mdix get from phy reg */ if (h->ae_algo->ops->get_mdix_mode) @@ -698,7 +638,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev) if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_key_size) - return -EOPNOTSUPP; + return 0; return h->ae_algo->ops->get_rss_key_size(h); } @@ -709,7 +649,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev) if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_indir_size) - return -EOPNOTSUPP; + return 0; return h->ae_algo->ops->get_rss_indir_size(h); } @@ -905,11 +845,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, tx_vector = priv->ring_data[queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; - cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable; - cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable; + cmd->use_adaptive_tx_coalesce = + tx_vector->tx_group.coal.gl_adapt_enable; + cmd->use_adaptive_rx_coalesce = + rx_vector->rx_group.coal.gl_adapt_enable; - cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl; - cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl; + cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; + cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; @@ -1029,14 +971,18 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, tx_vector = priv->ring_data[queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; - tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; - rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce; + tx_vector->tx_group.coal.gl_adapt_enable = + cmd->use_adaptive_tx_coalesce; + rx_vector->rx_group.coal.gl_adapt_enable = + cmd->use_adaptive_rx_coalesce; - tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs; - rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs; + tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; + rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; - hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl); - hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl); + hns3_set_vector_coalesce_tx_gl(tx_vector, + tx_vector->tx_group.coal.int_gl); + hns3_set_vector_coalesce_rx_gl(rx_vector, + rx_vector->rx_group.coal.int_gl); hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); @@ -1111,6 +1057,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_channels = hns3_get_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, + .get_link = hns3_get_link, }; static const struct ethtool_ops hns3_ethtool_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3fd10a6bec53..ee3cbac6dfaa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/io.h> -#define HCLGE_CMDQ_TX_TIMEOUT 1000 +#define HCLGE_CMDQ_TX_TIMEOUT 30000 struct hclge_dev; struct hclge_desc { @@ -414,6 +414,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) #define HCLGE_CFG_RSS_SIZE_S 24 #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) +#define HCLGE_CFG_SPEED_ABILITY_S 0 +#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) struct hclge_cfg_param_cmd { __le32 offset; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 5018d6633133..955f0e3d5c95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -144,6 +144,8 @@ static int hclge_map_update(struct hnae3_handle *h) if (ret) return ret; + hclge_rss_indir_init_cfg(hdev); + return hclge_rss_init_hw(hdev); } @@ -203,9 +205,11 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { + u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; + int ret; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; @@ -220,6 +224,18 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) } } + ret = hclge_pfc_tx_stats_get(hdev, requests); + if (ret) + return ret; + + ret = hclge_pfc_rx_stats_get(hdev, indications); + if (ret) + return ret; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + pfc->requests[i] = requests[i]; + pfc->indications[i] = indications[i]; + } return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 32bc6f68e297..2066dd734444 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -55,6 +55,8 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { {0, } }; +MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "Mac Loopback test", "Serdes Loopback test", @@ -1024,6 +1026,45 @@ static int hclge_parse_speed(int speed_cmd, int *speed) return 0; } +static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); + + set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +} + +static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) +{ + u8 media_type = hdev->hw.mac.media_type; + + if (media_type != HNAE3_MEDIA_TYPE_FIBER) + return; + + hclge_parse_fiber_link_mode(hdev, speed_ability); +} + static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { struct hclge_cfg_param_cmd *req; @@ -1072,6 +1113,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); + + cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1160,6 +1205,8 @@ static int hclge_configure(struct hclge_dev *hdev) return ret; } + hclge_parse_link_mode(hdev, cfg.speed_ability); + if ((hdev->tc_max > HNAE3_MAX_TC) || (hdev->tc_max < 1)) { dev_warn(&hdev->pdev->dev, "TC num = %d.\n", @@ -2702,7 +2749,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } -static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; @@ -2798,27 +2845,31 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_notify_client(hdev, HNAE3_UP_CLIENT); } -static void hclge_reset_event(struct hnae3_handle *handle, - enum hnae3_reset_type reset) +static void hclge_reset_event(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - dev_info(&hdev->pdev->dev, - "Receive reset event , reset_type is %d", reset); + /* check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + */ + if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) + handle->reset_level = HNAE3_FUNC_RESET; - switch (reset) { - case HNAE3_FUNC_RESET: - case HNAE3_CORE_RESET: - case HNAE3_GLOBAL_RESET: - /* request reset & schedule reset task */ - set_bit(reset, &hdev->reset_request); - hclge_reset_task_schedule(hdev); - break; - default: - dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); - break; - } + dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", + handle->reset_level); + + /* request reset & schedule reset task */ + set_bit(handle->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + + if (handle->reset_level < HNAE3_GLOBAL_RESET) + handle->reset_level++; + + handle->last_reset_time = jiffies; } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -2969,6 +3020,24 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) return -EINVAL; } +static int hclge_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. vector_id =%d\n", vector_id); + return vector_id; + } + + hclge_free_vector(hdev, vector_id); + + return 0; +} + static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) { return HCLGE_RSS_KEY_SIZE; @@ -2979,31 +3048,6 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) return HCLGE_RSS_IND_TBL_SIZE; } -static int hclge_get_rss_algo(struct hclge_dev *hdev) -{ - struct hclge_rss_config_cmd *req; - struct hclge_desc desc; - int rss_hash_algo; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get link status error, status =%d\n", ret); - return ret; - } - - req = (struct hclge_rss_config_cmd *)desc.data; - rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); - - if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) - return ETH_RSS_HASH_TOP; - - return -EINVAL; -} - static int hclge_set_rss_algo_key(struct hclge_dev *hdev, const u8 hfunc, const u8 *key) { @@ -3042,7 +3086,7 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, return 0; } -static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) { struct hclge_rss_indirection_table_cmd *req; struct hclge_desc desc; @@ -3116,14 +3160,16 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); req = (struct hclge_rss_input_tuple_cmd *)desc.data; - req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; - req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; - req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + + /* Get the tuple cfg from pf */ + req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -3138,12 +3184,11 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; int i; /* Get hash algorithm */ if (hfunc) - *hfunc = hclge_get_rss_algo(hdev); + *hfunc = vport->rss_algo; /* Get the RSS Key required by the user */ if (key) @@ -3167,8 +3212,6 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - /* Update the shadow RSS key with user specified qids */ - memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); if (hfunc == ETH_RSS_HASH_TOP || hfunc == ETH_RSS_HASH_NO_CHANGE) @@ -3178,6 +3221,10 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); + vport->rss_algo = hash_algo; } /* Update the shadow RSS table with user specified qids */ @@ -3185,8 +3232,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, vport->rss_indirection_tbl[i] = indir[i]; /* Update the hardware */ - ret = hclge_set_rss_indir_table(hdev, indir); - return ret; + return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); } static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) @@ -3229,15 +3275,16 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, return -EINVAL; req = (struct hclge_rss_input_tuple_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Read rss tuple fail, status = %d\n", ret); - return ret; - } + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); - hclge_cmd_reuse_desc(&desc, false); + req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; tuple_sets = hclge_get_rss_hash_bits(nfc); switch (nfc->flow_type) { @@ -3274,52 +3321,49 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "Set rss tuple fail, status = %d\n", ret); + return ret; + } - return ret; + vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; } static int hclge_get_rss_tuple(struct hnae3_handle *handle, struct ethtool_rxnfc *nfc) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct hclge_rss_input_tuple_cmd *req; - struct hclge_desc desc; u8 tuple_sets; - int ret; nfc->data = 0; - req = (struct hclge_rss_input_tuple_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Read rss tuple fail, status = %d\n", ret); - return ret; - } - switch (nfc->flow_type) { case TCP_V4_FLOW: - tuple_sets = req->ipv4_tcp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; break; case UDP_V4_FLOW: - tuple_sets = req->ipv4_udp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; break; case TCP_V6_FLOW: - tuple_sets = req->ipv6_tcp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; break; case UDP_V6_FLOW: - tuple_sets = req->ipv6_udp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; break; case SCTP_V4_FLOW: - tuple_sets = req->ipv4_sctp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; break; case SCTP_V6_FLOW: - tuple_sets = req->ipv6_sctp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; break; case IPV4_FLOW: case IPV6_FLOW: @@ -3354,50 +3398,28 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) int hclge_rss_init_hw(struct hclge_dev *hdev) { - const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; + u8 *rss_indir = vport[0].rss_indirection_tbl; + u16 rss_size = vport[0].alloc_rss_size; + u8 *key = vport[0].rss_hash_key; + u8 hfunc = vport[0].rss_algo; u16 tc_offset[HCLGE_MAX_TC_NUM]; - u8 rss_key[HCLGE_RSS_KEY_SIZE]; u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM]; - u32 *rss_indir = NULL; - u16 rss_size = 0, roundup_size; - const u8 *key; - int i, ret, j; - - rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); - if (!rss_indir) - return -ENOMEM; - - /* Get default RSS key */ - netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); - - /* Initialize RSS indirect table for each vport */ - for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { - for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { - vport[j].rss_indirection_tbl[i] = - i % vport[j].alloc_rss_size; - - /* vport 0 is for PF */ - if (j != 0) - continue; + u16 roundup_size; + int i, ret; - rss_size = vport[j].alloc_rss_size; - rss_indir[i] = vport[j].rss_indirection_tbl[i]; - } - } ret = hclge_set_rss_indir_table(hdev, rss_indir); if (ret) - goto err; + return ret; - key = rss_key; ret = hclge_set_rss_algo_key(hdev, hfunc, key); if (ret) - goto err; + return ret; ret = hclge_set_rss_input_tuple(hdev); if (ret) - goto err; + return ret; /* Each TC have the same queue size, and tc_size set to hardware is * the log2 of roundup power of two of rss_size, the acutal queue @@ -3407,8 +3429,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) dev_err(&hdev->pdev->dev, "Configure rss tc size failed, invalid TC_SIZE = %d\n", rss_size); - ret = -EINVAL; - goto err; + return -EINVAL; } roundup_size = roundup_pow_of_two(rss_size); @@ -3425,12 +3446,50 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) tc_offset[i] = rss_size * i; } - ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} -err: - kfree(rss_indir); +void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i, j; - return ret; + for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + vport[j].rss_indirection_tbl[i] = + i % vport[j].alloc_rss_size; + } +} + +static void hclge_rss_init_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport[i].rss_tuple_sets.ipv4_tcp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv4_udp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv4_sctp_en = + HCLGE_RSS_INPUT_TUPLE_SCTP; + vport[i].rss_tuple_sets.ipv4_fragment_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_tcp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_udp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_sctp_en = + HCLGE_RSS_INPUT_TUPLE_SCTP; + vport[i].rss_tuple_sets.ipv6_fragment_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + + vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + + netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); + } + + hclge_rss_indir_init_cfg(hdev); } int hclge_bind_ring_with_vector(struct hclge_vport *vport, @@ -3525,6 +3584,9 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, struct hclge_dev *hdev = vport->back; int vector_id, ret; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&handle->pdev->dev, @@ -3533,18 +3595,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, } ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); - if (ret) { + if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vectorid=%d, ret =%d\n", vector_id, ret); - return ret; - } - /* Free this MSIX or MSI vector */ - hclge_free_vector(hdev, vector_id); - - return 0; + return ret; } int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, @@ -3717,32 +3774,26 @@ static int hclge_ae_start(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i, queue_id, ret; + int i, ret; - for (i = 0; i < vport->alloc_tqps; i++) { - /* todo clear interrupt */ - /* ring enable */ - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + for (i = 0; i < vport->alloc_tqps; i++) + hclge_tqp_enable(hdev, i, 0, true); - hclge_tqp_enable(hdev, queue_id, 0, true); - } /* mac enable */ hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); mod_timer(&hdev->service_timer, jiffies + HZ); + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + ret = hclge_mac_start_phy(hdev); if (ret) return ret; - /* reset tqp stats */ - hclge_reset_tqp_stats(handle); - return 0; } @@ -3750,19 +3801,17 @@ static void hclge_ae_stop(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i, queue_id; + int i; - for (i = 0; i < vport->alloc_tqps; i++) { - /* Ring disable */ - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return; + + for (i = 0; i < vport->alloc_tqps; i++) + hclge_tqp_enable(hdev, i, 0, false); - hclge_tqp_enable(hdev, queue_id, 0, false); - } /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -3770,6 +3819,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); + hclge_update_link_status(hdev); } static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, @@ -3790,11 +3840,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if ((!resp_code) || (resp_code == 1)) { return_status = 0; } else if (resp_code == 2) { - return_status = -EIO; + return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); } else if (resp_code == 3) { - return_status = -EIO; + return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); } else { @@ -3806,7 +3856,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (!resp_code) { return_status = 0; } else if (resp_code == 1) { - return_status = -EIO; + return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "remove mac addr failed for miss.\n"); } else { @@ -3818,7 +3868,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (!resp_code) { return_status = 0; } else if (resp_code == 1) { - return_status = -EIO; + return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "lookup mac addr failed for miss.\n"); } else { @@ -3827,7 +3877,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, resp_code); } } else { - return_status = -EIO; + return_status = -EINVAL; dev_err(&hdev->pdev->dev, "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", op); @@ -4118,8 +4168,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; + struct hclge_desc desc; u16 egress_port = 0; + int ret; /* mac addr check */ if (is_zero_ether_addr(addr) || @@ -4151,9 +4202,23 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, hclge_prepare_mac_addr(&req, addr); - status = hclge_add_mac_vlan_tbl(vport, &req, NULL); + /* Lookup the mac address in the mac_vlan table, and add + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ + ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); + if (ret == -ENOENT) + return hclge_add_mac_vlan_tbl(vport, &req, NULL); + + /* check if we just hit the duplicate */ + if (!ret) + ret = -EINVAL; - return status; + dev_err(&hdev->pdev->dev, + "PF failed to add unicast entry(%pM) in the MAC table\n", + addr); + + return ret; } static int hclge_rm_uc_addr(struct hnae3_handle *handle, @@ -4169,7 +4234,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; + int ret; /* mac addr check */ if (is_zero_ether_addr(addr) || @@ -4185,9 +4250,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); - status = hclge_remove_mac_vlan_tbl(vport, &req); + ret = hclge_remove_mac_vlan_tbl(vport, &req); - return status; + return ret; } static int hclge_add_mc_addr(struct hnae3_handle *handle, @@ -4392,7 +4457,8 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); @@ -4409,11 +4475,9 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) return -EINVAL; } - ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); - if (ret) + if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail, ret =%d.\n", - ret); + "remove old uc mac address fail.\n"); ret = hclge_add_uc_addr(handle, new_addr); if (ret) { @@ -4421,17 +4485,15 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) "add uc mac address fail, ret =%d.\n", ret); - ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr); - if (ret) { + if (!is_first && + hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_err(&hdev->pdev->dev, - "restore uc mac address fail, ret =%d.\n", - ret); - } + "restore uc mac address fail.\n"); return -EIO; } - ret = hclge_mac_pause_addr_cfg(hdev, new_addr); + ret = hclge_pause_addr_cfg(hdev, new_addr); if (ret) { dev_err(&hdev->pdev->dev, "configure mac pause address fail, ret =%d.\n", @@ -4771,11 +4833,9 @@ static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclge_set_vlan_rx_offload_cfg(vport); } -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_max_frm_size_cmd *req; - struct hclge_dev *hdev = vport->back; struct hclge_desc desc; int max_frm_size; int ret; @@ -4804,6 +4864,27 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) return 0; } +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_set_mac_mtu(hdev, new_mtu); + if (ret) { + dev_err(&hdev->pdev->dev, + "Change mtu fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "Allocate buffer fail, ret =%d\n", ret); + + return ret; +} + static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, bool enable) { @@ -4848,21 +4929,39 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } +static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, + u16 queue_id) +{ + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + + queue = handle->kinfo.tqp[queue_id]; + tqp = container_of(queue, struct hclge_tqp, q); + + return tqp->index; +} + void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; int reset_try_times = 0; int reset_status; + u16 queue_gid; int ret; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return; + + queue_gid = hclge_covert_handle_qid_global(handle, queue_id); + ret = hclge_tqp_enable(hdev, queue_id, 0, false); if (ret) { dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); return; } - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); if (ret) { dev_warn(&hdev->pdev->dev, "Send reset tqp cmd fail, ret = %d\n", ret); @@ -4873,7 +4972,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { /* Wait for tqp hw reset */ msleep(20); - reset_status = hclge_get_reset_status(hdev, queue_id); + reset_status = hclge_get_reset_status(hdev, queue_gid); if (reset_status) break; } @@ -4883,7 +4982,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) return; } - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); if (ret) { dev_warn(&hdev->pdev->dev, "Deassert the soft reset fail, ret = %d\n", ret); @@ -4891,6 +4990,43 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) } } +void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) +{ + struct hclge_dev *hdev = vport->back; + int reset_try_times = 0; + int reset_status; + u16 queue_gid; + int ret; + + queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); + + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return; + } + + reset_try_times = 0; + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + /* Wait for tqp hw reset */ + msleep(20); + reset_status = hclge_get_reset_status(hdev, queue_gid); + if (reset_status) + break; + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); +} + static u32 hclge_get_fw_version(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -5376,11 +5512,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); return ret; } - ret = hclge_buffer_alloc(hdev); - if (ret) { - dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); - return ret; - } ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { @@ -5400,6 +5531,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_rss_init_cfg(hdev); ret = hclge_rss_init_hw(hdev); if (ret) { dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); @@ -5486,12 +5618,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_buffer_alloc(hdev); - if (ret) { - dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); - return ret; - } - ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); @@ -5504,9 +5630,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_tm_schd_init(hdev); + ret = hclge_tm_init_hw(hdev); if (ret) { - dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); return ret; } @@ -5997,6 +6123,42 @@ static int hclge_update_led_status(struct hclge_dev *hdev) HCLGE_LED_NO_CHANGE); } +static void hclge_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned int idx = 0; + + for (; idx < size; idx++) { + supported[idx] = hdev->hw.mac.supported[idx]; + advertising[idx] = hdev->hw.mac.advertising[idx]; + } +} + +static void hclge_get_port_type(struct hnae3_handle *handle, + u8 *port_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 media_type = hdev->hw.mac.media_type; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + *port_type = PORT_FIBRE; + break; + case HNAE3_MEDIA_TYPE_COPPER: + *port_type = PORT_TP; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + *port_type = PORT_OTHER; + break; + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -6005,6 +6167,7 @@ static const struct hnae3_ae_ops hclge_ops = { .map_ring_to_vector = hclge_map_ring_to_vector, .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, .get_vector = hclge_get_vector, + .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, @@ -6051,6 +6214,8 @@ static const struct hnae3_ae_ops hclge_ops = { .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, + .get_link_mode = hclge_get_link_mode, + .get_port_type = hclge_get_port_type, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index d99a76a9557c..0f4157e71282 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -106,6 +106,12 @@ #define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MAX_FRAME 9728 +#define HCLGE_SUPPORT_1G_BIT BIT(0) +#define HCLGE_SUPPORT_10G_BIT BIT(1) +#define HCLGE_SUPPORT_25G_BIT BIT(2) +#define HCLGE_SUPPORT_50G_BIT BIT(3) +#define HCLGE_SUPPORT_100G_BIT BIT(4) + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -170,6 +176,8 @@ struct hclge_mac { struct phy_device *phydev; struct mii_bus *mdio_bus; phy_interface_t phy_if; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); }; struct hclge_hw { @@ -236,6 +244,7 @@ struct hclge_cfg { u8 mac_addr[ETH_ALEN]; u8 default_speed; u32 numa_node_map; + u8 speed_ability; }; struct hclge_tm_info { @@ -573,12 +582,27 @@ struct hclge_rx_vtag_cfg { bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ }; +struct hclge_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + int rss_algo; /* User configured hash algorithm */ + /* User configured rss tuple sets */ + struct hclge_rss_tuple_cfg rss_tuple_sets; + u16 alloc_rss_size; u16 qs_offset; @@ -627,8 +651,11 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); +void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); +void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); int hclge_cfg_flowctrl(struct hclge_dev *hdev); +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f38fc5ce9f51..a6f7ffa9c259 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,6 +79,18 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } +static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +{ + u8 msg_data[2]; + u8 dest_vfid; + + dest_vfid = (u8)vport->vport_id; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + HCLGE_MBX_ASSERTING_RESET, dest_vfid); +} + static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) { struct hnae3_ring_chain_node *chain_tmp, *chain; @@ -105,14 +117,17 @@ static int hclge_get_ring_chain_from_mbx( struct hnae3_ring_chain_node *ring_chain, struct hclge_vport *vport) { -#define HCLGE_RING_NODE_VARIABLE_NUM 3 -#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3 struct hnae3_ring_chain_node *cur_chain, *new_chain; int ring_num; int i; ring_num = req->msg[2]; + if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / + HCLGE_MBX_RING_NODE_VARIABLE_NUM)) + return -ENOMEM; + hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); @@ -128,18 +143,18 @@ static int hclge_get_ring_chain_from_mbx( goto err; hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]); + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp - [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]); + [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]); + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -196,6 +211,8 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); + if (status) + hclge_add_uc_addr_common(vport, old_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { @@ -291,7 +308,7 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport, /* get the queue related info */ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16)); + memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); @@ -304,27 +321,61 @@ static int hclge_get_link_info(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[2]; + u8 msg_data[8]; u8 dest_vfid; + u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; + duplex = hdev->hw.mac.duplex; memcpy(&msg_data[0], &link_status, sizeof(u16)); + memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); + memcpy(&msg_data[6], &duplex, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } -static void hclge_reset_vf_queue(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { u16 queue_id; memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); - hclge_reset_tqp(&vport->nic, queue_id); + hclge_reset_vf_queue(vport, queue_id); + + /* send response msg to VF after queue reset complete*/ + hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0); +} + +static void hclge_reset_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", + mbx_req->mbx_src_vfid); + + /* Acknowledge VF that PF is now about to assert the reset for the VF. + * On receiving this message VF will get into pending state and will + * start polling for the hardware reset completion status. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", + ret, vport->vport_id); + return; + } + + dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", + mbx_req->mbx_src_vfid); + /* reset this virtual function */ + hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); } void hclge_mbx_handler(struct hclge_dev *hdev) @@ -333,11 +384,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; struct hclge_desc *desc; - int ret; + int ret, flag; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); /* handle all the mailbox requests in the queue */ - while (hnae_get_bit(crq->desc[crq->next_to_use].flag, - HCLGE_CMDQ_RX_OUTVLD_B)) { + while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; @@ -360,7 +411,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, false); + ret = hclge_set_vf_uc_mac_addr(vport, req, true); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", @@ -402,7 +453,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_QUEUE_RESET: - hclge_reset_vf_queue(vport, req); + hclge_mbx_reset_vf_queue(vport, req); + break; + case HCLGE_MBX_RESET: + hclge_reset_vf(vport, req); break; default: dev_err(&hdev->pdev->dev, @@ -410,7 +464,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req->msg[0]); break; } + crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index c1dea3a47bdd..682c2d6618e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -60,6 +60,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, struct hclge_desc desc; int ret; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; @@ -95,6 +98,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) struct hclge_desc desc; int ret; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 36bd79a77940..885f25cd7be4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -23,6 +23,9 @@ enum hclge_shaper_level { HCLGE_SHAPER_LVL_PF = 1, }; +#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 +#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 + #define HCLGE_SHAPER_BS_U_DEF 5 #define HCLGE_SHAPER_BS_S_DEF 20 @@ -112,6 +115,56 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } +static int hclge_pfc_stats_get(struct hclge_dev *hdev, + enum hclge_opcode_type opcode, u64 *stats) +{ + struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; + int ret, i, j; + + if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || + opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) + return -EINVAL; + + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], opcode, true); + if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get pfc pause stats fail, ret = %d.\n", ret); + return ret; + } + + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { + struct hclge_pfc_stats_cmd *pfc_stats = + (struct hclge_pfc_stats_cmd *)desc[i].data; + + for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { + u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; + + if (index < HCLGE_MAX_TC_NUM) + stats[index] = + le64_to_cpu(pfc_stats->pkt_num[j]); + } + } + return 0; +} + +int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); +} + +int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); +} + int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) { struct hclge_desc desc; @@ -138,8 +191,8 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, - u8 pause_trans_gap, u16 pause_trans_time) +static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; @@ -155,7 +208,7 @@ static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, return hclge_cmd_send(&hdev->hw, &desc, 1); } -int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; @@ -174,7 +227,7 @@ int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) trans_gap = pause_param->pause_trans_gap; trans_time = le16_to_cpu(pause_param->pause_trans_time); - return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap, + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); } @@ -1096,11 +1149,11 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) return hclge_tm_schd_mode_hw(hdev); } -static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev) +static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - return hclge_mac_pause_param_cfg(hdev, mac->mac_addr, + return hclge_pause_param_cfg(hdev, mac->mac_addr, HCLGE_DEFAULT_PAUSE_TRANS_GAP, HCLGE_DEFAULT_PAUSE_TRANS_TIME); } @@ -1151,13 +1204,12 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) int ret; u8 i; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { - ret = hclge_mac_pause_setup_hw(hdev); - if (ret) - return ret; + ret = hclge_pause_param_setup_hw(hdev); + if (ret) + return ret; - return hclge_mac_pause_param_setup_hw(hdev); - } + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) + return hclge_mac_pause_setup_hw(hdev); /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 5401e7559437..2dbe177581e9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -109,6 +109,10 @@ struct hclge_cfg_pause_param_cmd { __le16 pause_trans_time; }; +struct hclge_pfc_stats_cmd { + __le64 pkt_num[3]; +}; + struct hclge_port_shapping_cmd { __le32 port_shapping_para; }; @@ -129,5 +133,7 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); -int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 85985e731311..1bbfe131b596 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -315,6 +315,12 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) goto err_csq; } + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + hdev->arq.count = 0; + /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 2caca9317f8c..621c6cbacf76 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include "hnae3.h" -#define HCLGEVF_CMDQ_TX_TIMEOUT 200 +#define HCLGEVF_CMDQ_TX_TIMEOUT 30000 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0d89965f7928..2b8426412cc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> +#include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" #include "hclge_mbx.h" @@ -9,6 +10,8 @@ #define HCLGEVF_NAME "hclgevf" +static int hclgevf_init_hdev(struct hclgevf_dev *hdev); +static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; static const struct pci_device_id ae_algovf_pci_tbl[] = { @@ -18,6 +21,8 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; +MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); + static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { @@ -206,6 +211,12 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) struct hclgevf_tqp *tqp; int i; + /* if this is on going reset then we need to re-allocate the TPQs + * since we cannot assume we would get same number of TPQs back from PF + */ + if (hclgevf_dev_ongoing_reset(hdev)) + devm_kfree(&hdev->pdev->dev, hdev->htqp); + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -249,6 +260,12 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * kinfo->num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); + /* if this is on going reset then we need to re-allocate the hnae queues + * as well since number of TPQs from PF might have changed. + */ + if (hclgevf_dev_ongoing_reset(hdev)) + devm_kfree(&hdev->pdev->dev, kinfo->tqp); + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -533,13 +550,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, int vector, struct hnae3_ring_chain_node *ring_chain) { -#define HCLGEVF_RING_NODE_VARIABLE_NUM 3 -#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i, vector_id; + int i = 0, vector_id; int status; u8 type; @@ -551,28 +566,33 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, return vector_id; } - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); - type = en ? - HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR; - req->msg[0] = type; - req->msg[1] = vector_id; /* vector_id should be id in VF */ - - i = 0; for (node = ring_chain; node; node = node->next) { - i++; - /* msg[2] is cause num */ - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] = + int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + + HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; + + if (i == 0) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_MBX_VF_TO_PF, + false); + type = en ? + HCLGE_MBX_MAP_RING_TO_VECTOR : + HCLGE_MBX_UNMAP_RING_TO_VECTOR; + req->msg[0] = type; + req->msg[1] = vector_id; + } + + req->msg[idx_offset] = hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] = - node->tqp_index; - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] = - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); - - if (i == (HCLGE_MBX_VF_MSG_DATA_NUM - - HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) / - HCLGEVF_RING_NODE_VARIABLE_NUM) { + req->msg[idx_offset + 1] = node->tqp_index; + req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); + + i++; + if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / + HCLGE_MBX_RING_NODE_VARIABLE_NUM) || + !node->next) { req->msg[2] = i; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); @@ -591,17 +611,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } } - if (i > 0) { - req->msg[2] = i; - - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Map TQP fail, status is %d.\n", status); - return status; - } - } - return 0; } @@ -627,13 +636,18 @@ static int hclgevf_unmap_ring_from_vector( } ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); - if (ret) { + if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", vector_id, ret); - return ret; - } + + return ret; +} + +static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); hclgevf_free_vector(hdev, vector); @@ -729,21 +743,25 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p) +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; u8 *new_mac_addr = (u8 *)p; u8 msg_data[ETH_ALEN * 2]; + u16 subcode; int status; ether_addr_copy(msg_data, new_mac_addr); ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); + subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : + HCLGE_MBX_MAC_VLAN_UC_MODIFY; + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_MODIFY, - msg_data, ETH_ALEN * 2, - false, NULL, 0); + subcode, msg_data, ETH_ALEN * 2, + true, NULL, 0); if (!status) ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); @@ -816,11 +834,149 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 msg_data[2]; + int ret; memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, - NULL, 0); + /* disable vf queue before send queue reset msg to PF */ + ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); + if (ret) + return; + + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, + 2, true, NULL, 0); +} + +static int hclgevf_notify_client(struct hclgevf_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle = &hdev->nic; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + return client->ops->reset_notify(handle, type); +} + +static int hclgevf_reset_wait(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RESET_WAIT_MS 500 +#define HCLGEVF_RESET_WAIT_CNT 20 + u32 val, cnt = 0; + + /* wait to check the hardware reset completion status */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); + while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { + msleep(HCLGEVF_RESET_WAIT_MS); + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); + cnt++; + } + + /* hardware completion status should be available by this time */ + if (cnt >= HCLGEVF_RESET_WAIT_CNT) { + dev_warn(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return -EBUSY; + } + + /* we will wait a bit more to let reset of the stack to complete. This + * might happen in case reset assertion was made by PF. Yes, this also + * means we might end up waiting bit more even for VF reset. + */ + msleep(5000); + + return 0; +} + +static int hclgevf_reset_stack(struct hclgevf_dev *hdev) +{ + int ret; + + /* uninitialize the nic client */ + hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + + /* re-initialize the hclge device */ + ret = hclgevf_init_hdev(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "hclge device re-init failed, VF is disabled!\n"); + return ret; + } + + /* bring up the nic client again */ + hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + + return 0; +} + +static int hclgevf_reset(struct hclgevf_dev *hdev) +{ + int ret; + + rtnl_lock(); + + /* bring down the nic to stop any ongoing TX/RX */ + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + + /* check if VF could successfully fetch the hardware reset completion + * status from the hardware + */ + ret = hclgevf_reset_wait(hdev); + if (ret) { + /* can't do much in this situation, will disable VF */ + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to fetch H/W reset completion status\n", + ret); + + dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); + hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + + rtnl_unlock(); + return ret; + } + + /* now, re-initialize the nic client and ae device*/ + ret = hclgevf_reset_stack(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + + /* bring up the nic to enable TX/RX again */ + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + + rtnl_unlock(); + + return ret; +} + +static int hclgevf_do_reset(struct hclgevf_dev *hdev) +{ + int status; + u8 respmsg; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, + 0, false, &respmsg, sizeof(u8)); + if (status) + dev_err(&hdev->pdev->dev, + "VF reset request to PF failed(=%d)\n", status); + + return status; +} + +static void hclgevf_reset_event(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + + handle->reset_level = HNAE3_VF_RESET; + + /* reset of this VF requested */ + set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + handle->last_reset_time = jiffies; } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -845,10 +1001,22 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) hdev->num_msi_used += 1; } -static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && + !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); + schedule_work(&hdev->rst_service_task); + } +} + +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) { - if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && + !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { + set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); schedule_work(&hdev->mbx_service_task); + } } static void hclgevf_task_schedule(struct hclgevf_dev *hdev) @@ -858,6 +1026,16 @@ static void hclgevf_task_schedule(struct hclgevf_dev *hdev) schedule_work(&hdev->service_task); } +static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) +{ + /* if we have any pending mailbox event then schedule the mbx task */ + if (hdev->mbx_event_pending) + hclgevf_mbx_task_schedule(hdev); + + if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) + hclgevf_reset_task_schedule(hdev); +} + static void hclgevf_service_timer(struct timer_list *t) { struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); @@ -867,6 +1045,75 @@ static void hclgevf_service_timer(struct timer_list *t) hclgevf_task_schedule(hdev); } +static void hclgevf_reset_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev = + container_of(work, struct hclgevf_dev, rst_service_task); + int ret; + + if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); + + if (test_and_clear_bit(HCLGEVF_RESET_PENDING, + &hdev->reset_state)) { + /* PF has initmated that it is about to reset the hardware. + * We now have to poll & check if harware has actually completed + * the reset sequence. On hardware reset completion, VF needs to + * reset the client and ae device. + */ + hdev->reset_attempts = 0; + + ret = hclgevf_reset(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, + &hdev->reset_state)) { + /* we could be here when either of below happens: + * 1. reset was initiated due to watchdog timeout due to + * a. IMP was earlier reset and our TX got choked down and + * which resulted in watchdog reacting and inducing VF + * reset. This also means our cmdq would be unreliable. + * b. problem in TX due to other lower layer(example link + * layer not functioning properly etc.) + * 2. VF reset might have been initiated due to some config + * change. + * + * NOTE: Theres no clear way to detect above cases than to react + * to the response of PF for this reset request. PF will ack the + * 1b and 2. cases but we will not get any intimation about 1a + * from PF as cmdq would be in unreliable state i.e. mailbox + * communication between PF and VF would be broken. + */ + + /* if we are never geting into pending state it means either: + * 1. PF is not receiving our request which could be due to IMP + * reset + * 2. PF is screwed + * We cannot do much for 2. but to check first we can try reset + * our PCIe + stack and see if it alleviates the problem. + */ + if (hdev->reset_attempts > 3) { + /* prepare for full reset of stack + pcie interface */ + hdev->nic.reset_level = HNAE3_VF_FULL_RESET; + + /* "defer" schedule the reset task again */ + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + } else { + hdev->reset_attempts++; + + /* request PF for resetting this VF via mailbox */ + ret = hclgevf_do_reset(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, + "VF rst fail, stack will call\n"); + } + } + + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + static void hclgevf_mailbox_service_task(struct work_struct *work) { struct hclgevf_dev *hdev; @@ -878,7 +1125,7 @@ static void hclgevf_mailbox_service_task(struct work_struct *work) clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); - hclgevf_mbx_handler(hdev); + hclgevf_mbx_async_handler(hdev); clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); } @@ -894,6 +1141,8 @@ static void hclgevf_service_task(struct work_struct *work) */ hclgevf_request_link_info(hdev); + hclgevf_deferred_task_schedule(hdev); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); } @@ -936,8 +1185,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) if (!hclgevf_check_event_cause(hdev, &clearval)) goto skip_sched; - /* schedule the VF mailbox service task, if not already scheduled */ - hclgevf_mbx_task_schedule(hdev); + hclgevf_mbx_handler(hdev); hclgevf_clear_event_cause(hdev, clearval); @@ -959,6 +1207,22 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) return hclgevf_get_tc_info(hdev); } +static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev = ae_dev->priv; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + return 0; +} + static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) { struct hnae3_handle *roce = &hdev->roce; @@ -1057,10 +1321,17 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + hclgevf_update_link_status(hdev, 0); } static void hclgevf_state_init(struct hclgevf_dev *hdev) { + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return; + /* setup tasks for the MBX */ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); @@ -1072,6 +1343,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) INIT_WORK(&hdev->service_task, hclgevf_service_task); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); + INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); + mutex_init(&hdev->mbx_resp.mbx_mutex); /* bring the device down */ @@ -1088,6 +1361,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev) cancel_work_sync(&hdev->service_task); if (hdev->mbx_service_task.func) cancel_work_sync(&hdev->mbx_service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); mutex_destroy(&hdev->mbx_resp.mbx_mutex); } @@ -1098,6 +1373,10 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, @@ -1148,6 +1427,10 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { int ret = 0; + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + hclgevf_get_misc_vector(hdev); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, @@ -1258,6 +1541,14 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; + /* check if we need to skip initialization of pci. This will happen if + * device is undergoing VF reset. Otherwise, we would need to + * re-initialize pci interface again i.e. when device is not going + * through *any* reset or actually undergoing full reset. + */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1309,19 +1600,16 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) pci_set_drvdata(pdev, NULL); } -static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) { - struct pci_dev *pdev = ae_dev->pdev; - struct hclgevf_dev *hdev; + struct pci_dev *pdev = hdev->pdev; int ret; - hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); - if (!hdev) - return -ENOMEM; - - hdev->pdev = pdev; - hdev->ae_dev = ae_dev; - ae_dev->priv = hdev; + /* check if device is on-going full reset(i.e. pcie as well) */ + if (hclgevf_dev_ongoing_full_reset(hdev)) { + dev_warn(&pdev->dev, "device is going full reset\n"); + hclgevf_uninit_hdev(hdev); + } ret = hclgevf_pci_init(hdev); if (ret) { @@ -1406,15 +1694,38 @@ err_irq_init: return ret; } -static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev = ae_dev->priv; - hclgevf_cmd_uninit(hdev); hclgevf_misc_irq_uninit(hdev); hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); hclgevf_pci_uninit(hdev); +} + +static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + ret = hclgevf_alloc_hdev(ae_dev); + if (ret) { + dev_err(&pdev->dev, "hclge device allocation failed\n"); + return ret; + } + + ret = hclgevf_init_hdev(ae_dev->priv); + if (ret) + dev_err(&pdev->dev, "hclge device initialization failed\n"); + + return ret; +} + +static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_uninit_hdev(hdev); ae_dev->priv = NULL; } @@ -1447,6 +1758,43 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, ch->combined_count = hdev->num_tqps; } +static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *free_tqps, u16 *max_rss_size) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *free_tqps = 0; + *max_rss_size = hdev->rss_size_max; +} + +static int hclgevf_get_status(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->hw.mac.link; +} + +static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, + u8 *duplex) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = AUTONEG_DISABLE; +} + +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex) +{ + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -1457,6 +1805,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .map_ring_to_vector = hclgevf_map_ring_to_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .get_vector = hclgevf_get_vector, + .put_vector = hclgevf_put_vector, .reset_queue = hclgevf_reset_tqp, .set_promisc_mode = hclgevf_set_promisc_mode, .get_mac_addr = hclgevf_get_mac_addr, @@ -1476,7 +1825,11 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .reset_event = hclgevf_reset_event, .get_channels = hclgevf_get_channels, + .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_status = hclgevf_get_status, + .get_ksettings_an_result = hclgevf_get_ksettings_an_result, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a63bee4a3674..a477a7c36bbd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -34,6 +34,9 @@ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 +/* Reset related Registers */ +#define HCLGEVF_FUN_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_B 0 #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -52,6 +55,8 @@ enum hclgevf_states { HCLGEVF_STATE_DISABLED, /* task states */ HCLGEVF_STATE_SERVICE_SCHED, + HCLGEVF_STATE_RST_SERVICE_SCHED, + HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, }; @@ -61,6 +66,8 @@ enum hclgevf_states { struct hclgevf_mac { u8 mac_addr[ETH_ALEN]; int link; + u8 duplex; + u32 speed; }; struct hclgevf_hw { @@ -120,6 +127,11 @@ struct hclgevf_dev { struct hclgevf_rss_cfg rss_cfg; unsigned long state; +#define HCLGEVF_RESET_REQUESTED 0 +#define HCLGEVF_RESET_PENDING 1 + unsigned long reset_state; /* requested, pending */ + u32 reset_attempts; + u32 fw_version; u16 num_tqps; /* num task queue pairs of this PF */ @@ -140,10 +152,13 @@ struct hclgevf_dev { int *vector_irq; bool accept_mta_mc; /* whether to accept mta filter multicast */ + bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ + struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct timer_list service_timer; struct work_struct service_task; + struct work_struct rst_service_task; struct work_struct mbx_service_task; struct hclgevf_tqp *htqp; @@ -156,9 +171,29 @@ struct hclgevf_dev { u32 flag; }; +static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) +{ + return (hdev && + (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && + (hdev->nic.reset_level == HNAE3_VF_RESET)); +} + +static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +{ + return (hdev && + (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && + (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); +} + int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, const u8 *msg_data, u8 msg_len, bool need_resp, u8 *resp_data, u16 resp_len); void hclgevf_mbx_handler(struct hclgevf_dev *hdev); +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); + void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex); +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index e39cad285fa9..a28618428338 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -54,6 +54,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, mbx_resp = &hdev->mbx_resp; r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); + + if (mbx_resp->resp_status) + return mbx_resp->resp_status; + if (resp_data) memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); @@ -128,7 +132,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_mbx_pf_to_vf_cmd *req; struct hclgevf_cmq_ring *crq; struct hclgevf_desc *desc; - u16 link_status, flag; + u16 *msg_q; + u16 flag; u8 *temp; int i; @@ -140,6 +145,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + /* synchronous messages are time critical and need preferential + * treatment. Therefore, we need to acknowledge all the sync + * responses as quickly as possible so that waiting tasks do not + * timeout and simultaneously queue the async messages for later + * prcessing in context of mailbox task i.e. the slow path. + */ switch (req->msg[0]) { case HCLGE_MBX_PF_VF_RESP: if (resp->received_resp) @@ -159,10 +170,31 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } break; case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = le16_to_cpu(req->msg[1]); + case HCLGE_MBX_ASSERTING_RESET: + /* set this mbx event as pending. This is required as we + * might loose interrupt event when mbx task is busy + * handling. This shall be cleared when mbx task just + * enters handling state. + */ + hdev->mbx_event_pending = true; - /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%d)\n", + req->msg[1]); + break; + } + + /* tail the async message in arq */ + msg_q = hdev->arq.msg_q[hdev->arq.tail]; + memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + hdev->arq.count++; + + hclgevf_mbx_task_schedule(hdev); break; default: @@ -171,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req->msg[0]); break; } + crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } @@ -179,3 +212,57 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, crq->next_to_use); } + +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) +{ + u16 link_status; + u16 *msg_q; + u8 duplex; + u32 speed; + u32 tail; + + /* we can safely clear it now as we are at start of the async message + * processing + */ + hdev->mbx_event_pending = false; + + tail = hdev->arq.tail; + + /* process all the async queue messages */ + while (tail != hdev->arq.head) { + msg_q = hdev->arq.msg_q[hdev->arq.head]; + + switch (msg_q[0]) { + case HCLGE_MBX_LINK_STAT_CHANGE: + link_status = le16_to_cpu(msg_q[1]); + memcpy(&speed, &msg_q[2], sizeof(speed)); + duplex = (u8)le16_to_cpu(msg_q[4]); + + /* update upper layer with new link link status */ + hclgevf_update_link_status(hdev, link_status); + hclgevf_update_speed_duplex(hdev, speed, duplex); + + break; + case HCLGE_MBX_ASSERTING_RESET: + /* PF has asserted reset hence VF should go in pending + * state and poll for the hardware reset status till it + * has been completely reset. After this stack should + * eventually be re-initialized. + */ + hdev->nic.reset_level = HNAE3_VF_RESET; + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + break; + default: + dev_err(&hdev->pdev->dev, + "fetched unsupported(%d) message from arq\n", + msg_q[0]); + break; + } + + hclge_mbx_head_ptr_move_arq(hdev->arq); + hdev->arq.count--; + msg_q = hdev->arq.msg_q[hdev->arq.head]; + } +} diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 4878b7169e0f..ba580bfae512 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2903,8 +2903,7 @@ static ssize_t ehea_show_port_id(struct device *dev, return sprintf(buf, "%d", port->logical_port_id); } -static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, - NULL); +static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL); static void logical_port_release(struct device *dev) { @@ -3235,8 +3234,8 @@ static ssize_t ehea_remove_port(struct device *dev, return (ssize_t) count; } -static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); -static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); +static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port); +static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port); static int ehea_create_device_sysfs(struct platform_device *dev) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index f210398200ec..c1b51edaaf62 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -82,7 +82,7 @@ module_param(rx_flush, uint, 0644); MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); static bool old_large_send __read_mostly; -module_param(old_large_send, bool, S_IRUGO); +module_param(old_large_send, bool, 0444); MODULE_PARM_DESC(old_large_send, "Use old large send method on firmware that supports the new method"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 27447260215d..b492af6affc3 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -90,7 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION); static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; static int ibmvnic_remove(struct vio_dev *); -static void release_sub_crqs(struct ibmvnic_adapter *); +static void release_sub_crqs(struct ibmvnic_adapter *, bool); static int ibmvnic_reset_crq(struct ibmvnic_adapter *); static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); @@ -111,7 +111,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int data); static void send_map_query(struct ibmvnic_adapter *adapter); static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); static void send_request_unmap(struct ibmvnic_adapter *, u8); -static void send_login(struct ibmvnic_adapter *adapter); +static int send_login(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); @@ -361,14 +361,14 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter) static int init_stats_buffers(struct ibmvnic_adapter *adapter) { adapter->tx_stats_buffers = - kcalloc(adapter->req_tx_queues, + kcalloc(IBMVNIC_MAX_QUEUES, sizeof(struct ibmvnic_tx_queue_stats), GFP_KERNEL); if (!adapter->tx_stats_buffers) return -ENOMEM; adapter->rx_stats_buffers = - kcalloc(adapter->req_rx_queues, + kcalloc(IBMVNIC_MAX_QUEUES, sizeof(struct ibmvnic_rx_queue_stats), GFP_KERNEL); if (!adapter->rx_stats_buffers) @@ -509,7 +509,7 @@ static int init_rx_pools(struct net_device *netdev) return -1; } - adapter->num_active_rx_pools = 0; + adapter->num_active_rx_pools = rxadd_subcrqs; for (i = 0; i < rxadd_subcrqs; i++) { rx_pool = &adapter->rx_pool[i]; @@ -554,41 +554,44 @@ static int init_rx_pools(struct net_device *netdev) rx_pool->next_free = 0; } - adapter->num_active_rx_pools = rxadd_subcrqs; + return 0; +} + +static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) +{ + int rc, i; + + rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); + if (rc) + return rc; + + memset(tx_pool->tx_buff, 0, + tx_pool->num_buffers * + sizeof(struct ibmvnic_tx_buff)); + + for (i = 0; i < tx_pool->num_buffers; i++) + tx_pool->free_map[i] = i; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; return 0; } static int reset_tx_pools(struct ibmvnic_adapter *adapter) { - struct ibmvnic_tx_pool *tx_pool; int tx_scrqs; - int i, j, rc; + int i, rc; tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { - netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); - - tx_pool = &adapter->tx_pool[i]; - - rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); if (rc) return rc; - - rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb); + rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); if (rc) return rc; - - memset(tx_pool->tx_buff, 0, - adapter->req_tx_entries_per_subcrq * - sizeof(struct ibmvnic_tx_buff)); - - for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) - tx_pool->free_map[j] = j; - - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; - tx_pool->tso_index = 0; } return 0; @@ -605,35 +608,70 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter) adapter->vpd = NULL; } +static void release_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) +{ + kfree(tx_pool->tx_buff); + kfree(tx_pool->free_map); + free_long_term_buff(adapter, &tx_pool->long_term_buff); +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { - struct ibmvnic_tx_pool *tx_pool; int i; if (!adapter->tx_pool) return; for (i = 0; i < adapter->num_active_tx_pools; i++) { - netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); - tx_pool = &adapter->tx_pool[i]; - kfree(tx_pool->tx_buff); - free_long_term_buff(adapter, &tx_pool->long_term_buff); - free_long_term_buff(adapter, &tx_pool->tso_ltb); - kfree(tx_pool->free_map); + release_one_tx_pool(adapter, &adapter->tx_pool[i]); + release_one_tx_pool(adapter, &adapter->tso_pool[i]); } kfree(adapter->tx_pool); adapter->tx_pool = NULL; + kfree(adapter->tso_pool); + adapter->tso_pool = NULL; adapter->num_active_tx_pools = 0; } +static int init_one_tx_pool(struct net_device *netdev, + struct ibmvnic_tx_pool *tx_pool, + int num_entries, int buf_size) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int i; + + tx_pool->tx_buff = kcalloc(num_entries, + sizeof(struct ibmvnic_tx_buff), + GFP_KERNEL); + if (!tx_pool->tx_buff) + return -1; + + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + num_entries * buf_size)) + return -1; + + tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) + return -1; + + for (i = 0; i < num_entries; i++) + tx_pool->free_map[i] = i; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + tx_pool->num_buffers = num_entries; + tx_pool->buf_size = buf_size; + + return 0; +} + static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_tx_pool *tx_pool; int tx_subcrqs; - int i, j; + int i, rc; tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); adapter->tx_pool = kcalloc(tx_subcrqs, @@ -641,57 +679,31 @@ static int init_tx_pools(struct net_device *netdev) if (!adapter->tx_pool) return -1; - adapter->num_active_tx_pools = 0; - - for (i = 0; i < tx_subcrqs; i++) { - tx_pool = &adapter->tx_pool[i]; - - netdev_dbg(adapter->netdev, - "Initializing tx_pool[%d], %lld buffs\n", - i, adapter->req_tx_entries_per_subcrq); - - tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(struct ibmvnic_tx_buff), - GFP_KERNEL); - if (!tx_pool->tx_buff) { - dev_err(dev, "tx pool buffer allocation failed\n"); - release_tx_pools(adapter); - return -1; - } + adapter->tso_pool = kcalloc(tx_subcrqs, + sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + if (!adapter->tso_pool) + return -1; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - adapter->req_tx_entries_per_subcrq * - adapter->req_mtu)) { - release_tx_pools(adapter); - return -1; - } + adapter->num_active_tx_pools = tx_subcrqs; - /* alloc TSO ltb */ - if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb, - IBMVNIC_TSO_BUFS * - IBMVNIC_TSO_BUF_SZ)) { + for (i = 0; i < tx_subcrqs; i++) { + rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], + adapter->req_tx_entries_per_subcrq, + adapter->req_mtu + VLAN_HLEN); + if (rc) { release_tx_pools(adapter); - return -1; + return rc; } - tx_pool->tso_index = 0; - - tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) { + init_one_tx_pool(netdev, &adapter->tso_pool[i], + IBMVNIC_TSO_BUFS, + IBMVNIC_TSO_BUF_SZ); + if (rc) { release_tx_pools(adapter); - return -1; + return rc; } - - for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) - tx_pool->free_map[j] = j; - - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; } - adapter->num_active_tx_pools = tx_subcrqs; - return 0; } @@ -740,6 +752,45 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) adapter->napi_enabled = false; } +static int init_napi(struct ibmvnic_adapter *adapter) +{ + int i; + + adapter->napi = kcalloc(adapter->req_rx_queues, + sizeof(struct napi_struct), GFP_KERNEL); + if (!adapter->napi) + return -ENOMEM; + + for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); + netif_napi_add(adapter->netdev, &adapter->napi[i], + ibmvnic_poll, NAPI_POLL_WEIGHT); + } + + adapter->num_active_rx_napi = adapter->req_rx_queues; + return 0; +} + +static void release_napi(struct ibmvnic_adapter *adapter) +{ + int i; + + if (!adapter->napi) + return; + + for (i = 0; i < adapter->num_active_rx_napi; i++) { + if (&adapter->napi[i]) { + netdev_dbg(adapter->netdev, + "Releasing napi[%d]\n", i); + netif_napi_del(&adapter->napi[i]); + } + } + + kfree(adapter->napi); + adapter->napi = NULL; + adapter->num_active_rx_napi = 0; +} + static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -750,7 +801,7 @@ static int ibmvnic_login(struct net_device *netdev) do { if (adapter->renegotiate) { adapter->renegotiate = false; - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); reinit_completion(&adapter->init_done); send_cap_queries(adapter); @@ -774,8 +825,11 @@ static int ibmvnic_login(struct net_device *netdev) } reinit_completion(&adapter->init_done); - send_login(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, + rc = send_login(adapter); + if (rc) { + dev_err(dev, "Unable to attempt device login\n"); + return rc; + } else if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Login timeout\n"); return -1; @@ -791,28 +845,28 @@ static int ibmvnic_login(struct net_device *netdev) return 0; } -static void release_resources(struct ibmvnic_adapter *adapter) +static void release_login_buffer(struct ibmvnic_adapter *adapter) { - int i; + kfree(adapter->login_buf); + adapter->login_buf = NULL; +} + +static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->login_rsp_buf); + adapter->login_rsp_buf = NULL; +} +static void release_resources(struct ibmvnic_adapter *adapter) +{ release_vpd_data(adapter); release_tx_pools(adapter); release_rx_pools(adapter); - release_stats_token(adapter); - release_stats_buffers(adapter); release_error_buffers(adapter); - - if (adapter->napi) { - for (i = 0; i < adapter->req_rx_queues; i++) { - if (&adapter->napi[i]) { - netdev_dbg(adapter->netdev, - "Releasing napi[%d]\n", i); - netif_napi_del(&adapter->napi[i]); - } - } - } + release_napi(adapter); + release_login_rsp_buffer(adapter); } static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) @@ -931,20 +985,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) static int init_resources(struct ibmvnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; rc = set_real_num_queues(netdev); if (rc) return rc; - rc = init_stats_buffers(adapter); - if (rc) - return rc; - - rc = init_stats_token(adapter); - if (rc) - return rc; - adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); if (!adapter->vpd) return -ENOMEM; @@ -957,16 +1003,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) } adapter->map_id = 1; - adapter->napi = kcalloc(adapter->req_rx_queues, - sizeof(struct napi_struct), GFP_KERNEL); - if (!adapter->napi) - return -ENOMEM; - for (i = 0; i < adapter->req_rx_queues; i++) { - netdev_dbg(netdev, "Adding napi[%d]\n", i); - netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, - NAPI_POLL_WEIGHT); - } + rc = init_napi(adapter); + if (rc) + return rc; send_map_query(adapter); @@ -1057,87 +1097,130 @@ static int ibmvnic_open(struct net_device *netdev) return rc; } -static void clean_tx_pools(struct ibmvnic_adapter *adapter) +static void clean_rx_pools(struct ibmvnic_adapter *adapter) { - struct ibmvnic_tx_pool *tx_pool; - u64 tx_entries; - int tx_scrqs; + struct ibmvnic_rx_pool *rx_pool; + struct ibmvnic_rx_buff *rx_buff; + u64 rx_entries; + int rx_scrqs; int i, j; - if (!adapter->tx_pool) + if (!adapter->rx_pool) return; - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); - tx_entries = adapter->req_tx_entries_per_subcrq; + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_entries = adapter->req_rx_add_entries_per_subcrq; - /* Free any remaining skbs in the tx buffer pools */ - for (i = 0; i < tx_scrqs; i++) { - tx_pool = &adapter->tx_pool[i]; - if (!tx_pool) + /* Free any remaining skbs in the rx buffer pools */ + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + if (!rx_pool || !rx_pool->rx_buff) continue; - netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); - for (j = 0; j < tx_entries; j++) { - if (tx_pool->tx_buff[j].skb) { - dev_kfree_skb_any(tx_pool->tx_buff[j].skb); - tx_pool->tx_buff[j].skb = NULL; + netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); + for (j = 0; j < rx_entries; j++) { + rx_buff = &rx_pool->rx_buff[j]; + if (rx_buff && rx_buff->skb) { + dev_kfree_skb_any(rx_buff->skb); + rx_buff->skb = NULL; } } } } -static int __ibmvnic_close(struct net_device *netdev) +static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) { - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int rc = 0; + struct ibmvnic_tx_buff *tx_buff; + u64 tx_entries; int i; - adapter->state = VNIC_CLOSING; + if (!tx_pool || !tx_pool->tx_buff) + return; - /* ensure that transmissions are stopped if called by do_reset */ - if (adapter->resetting) - netif_tx_disable(netdev); - else - netif_tx_stop_all_queues(netdev); + tx_entries = tx_pool->num_buffers; - ibmvnic_napi_disable(adapter); + for (i = 0; i < tx_entries; i++) { + tx_buff = &tx_pool->tx_buff[i]; + if (tx_buff && tx_buff->skb) { + dev_kfree_skb_any(tx_buff->skb); + tx_buff->skb = NULL; + } + } +} + +static void clean_tx_pools(struct ibmvnic_adapter *adapter) +{ + int tx_scrqs; + int i; + + if (!adapter->tx_pool || !adapter->tso_pool) + return; + + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + + /* Free any remaining skbs in the tx buffer pools */ + for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); + clean_one_tx_pool(adapter, &adapter->tx_pool[i]); + clean_one_tx_pool(adapter, &adapter->tso_pool[i]); + } +} + +static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) if (adapter->tx_scrq[i]->irq) { - netdev_dbg(adapter->netdev, + netdev_dbg(netdev, "Disabling tx_scrq[%d] irq\n", i); disable_irq(adapter->tx_scrq[i]->irq); } } - rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); - if (rc) - return rc; - if (adapter->rx_scrq) { for (i = 0; i < adapter->req_rx_queues; i++) { - int retries = 10; - - while (pending_scrq(adapter, adapter->rx_scrq[i])) { - retries--; - mdelay(100); - - if (retries == 0) - break; - } - if (adapter->rx_scrq[i]->irq) { - netdev_dbg(adapter->netdev, + netdev_dbg(netdev, "Disabling rx_scrq[%d] irq\n", i); disable_irq(adapter->rx_scrq[i]->irq); } } } +} + +static void ibmvnic_cleanup(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + /* ensure that transmissions are stopped if called by do_reset */ + if (adapter->resetting) + netif_tx_disable(netdev); + else + netif_tx_stop_all_queues(netdev); + ibmvnic_napi_disable(adapter); + ibmvnic_disable_irqs(adapter); + + clean_rx_pools(adapter); clean_tx_pools(adapter); +} + +static int __ibmvnic_close(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + adapter->state = VNIC_CLOSING; + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); + if (rc) + return rc; + ibmvnic_cleanup(netdev); adapter->state = VNIC_CLOSED; - return rc; + return 0; } static int ibmvnic_close(struct net_device *netdev) @@ -1169,7 +1252,10 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, int len = 0; u8 *hdr; - hdr_len[0] = sizeof(struct ethhdr); + if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) + hdr_len[0] = sizeof(struct vlan_ethhdr); + else + hdr_len[0] = sizeof(struct ethhdr); if (skb->protocol == htons(ETH_P_IP)) { hdr_len[1] = ip_hdr(skb)->ihl * 4; @@ -1285,6 +1371,21 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, txbuff->indir_arr + 1); } +static int ibmvnic_xmit_workarounds(struct sk_buff *skb, + struct net_device *netdev) +{ + /* For some backing devices, mishandling of small packets + * can result in a loss of connection or TX stall. Device + * architects recommend that no packet should be smaller + * than the minimum MTU value provided to the driver, so + * pad any packets to that length + */ + if (skb->len < netdev->min_mtu) + return skb_put_padto(skb, netdev->min_mtu); + + return 0; +} + static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -1322,7 +1423,17 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; } - tx_pool = &adapter->tx_pool[queue_num]; + if (ibmvnic_xmit_workarounds(skb, netdev)) { + tx_dropped++; + tx_send_failed++; + ret = NETDEV_TX_OK; + goto out; + } + if (skb_is_gso(skb)) + tx_pool = &adapter->tso_pool[queue_num]; + else + tx_pool = &adapter->tx_pool[queue_num]; + tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + @@ -1330,21 +1441,21 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) index = tx_pool->free_map[tx_pool->consumer_index]; - if (skb_is_gso(skb)) { - offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ; - dst = tx_pool->tso_ltb.buff + offset; - memset(dst, 0, IBMVNIC_TSO_BUF_SZ); - data_dma_addr = tx_pool->tso_ltb.addr + offset; - tx_pool->tso_index++; - if (tx_pool->tso_index == IBMVNIC_TSO_BUFS) - tx_pool->tso_index = 0; - } else { - offset = index * adapter->req_mtu; - dst = tx_pool->long_term_buff.buff + offset; - memset(dst, 0, adapter->req_mtu); - data_dma_addr = tx_pool->long_term_buff.addr + offset; + if (index == IBMVNIC_INVALID_MAP) { + dev_kfree_skb_any(skb); + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_OK; + goto out; } + tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; + + offset = index * tx_pool->buf_size; + dst = tx_pool->long_term_buff.buff + offset; + memset(dst, 0, tx_pool->buf_size); + data_dma_addr = tx_pool->long_term_buff.addr + offset; + if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -1366,8 +1477,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) } tx_pool->consumer_index = - (tx_pool->consumer_index + 1) % - adapter->req_tx_entries_per_subcrq; + (tx_pool->consumer_index + 1) % tx_pool->num_buffers; tx_buff = &tx_pool->tx_buff[index]; tx_buff->skb = skb; @@ -1383,11 +1493,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.n_crq_elem = 1; tx_crq.v1.n_sge = 1; tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; - tx_crq.v1.correlator = cpu_to_be32(index); + if (skb_is_gso(skb)) - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id); + tx_crq.v1.correlator = + cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); else - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.correlator = cpu_to_be32(index); + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); @@ -1422,6 +1534,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if ((*hdrs >> 7) & 1) { build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); tx_crq.v1.n_crq_elem = num_entries; + tx_buff->num_entries = num_entries; tx_buff->indir_arr[0] = tx_crq; tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, sizeof(tx_buff->indir_arr), @@ -1434,24 +1547,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_map_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); } else { + tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], &tx_crq); } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); - - if (tx_pool->consumer_index == 0) - tx_pool->consumer_index = - adapter->req_tx_entries_per_subcrq - 1; - else - tx_pool->consumer_index--; - dev_kfree_skb_any(skb); tx_buff->skb = NULL; @@ -1467,12 +1574,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } - if (atomic_inc_return(&tx_scrq->used) + if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { - netdev_info(netdev, "Stopping queue %d\n", queue_num); + netdev_dbg(netdev, "Stopping queue %d\n", queue_num); netif_stop_subqueue(netdev, queue_num); } @@ -1480,7 +1587,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_bytes += skb->len; txq->trans_start = jiffies; ret = NETDEV_TX_OK; + goto out; +tx_err_out: + /* roll back consumer index and map array*/ + if (tx_pool->consumer_index == 0) + tx_pool->consumer_index = + tx_pool->num_buffers - 1; + else + tx_pool->consumer_index--; + tx_pool->free_map[tx_pool->consumer_index] = index; out: netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; @@ -1599,16 +1715,19 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = ibmvnic_reenable_crq_queue(adapter); if (rc) return 0; + ibmvnic_cleanup(netdev); + } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) { + ibmvnic_cleanup(netdev); + } else { + rc = __ibmvnic_close(netdev); + if (rc) + return rc; } - rc = __ibmvnic_close(netdev); - if (rc) - return rc; - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || adapter->wait_for_reset) { release_resources(adapter); - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); } @@ -1646,6 +1765,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_tx_pools(adapter); init_rx_pools(netdev); init_tx_pools(netdev); + + release_napi(adapter); + init_napi(adapter); } else { rc = reset_tx_pools(adapter); if (rc) @@ -1654,12 +1776,15 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = reset_rx_pools(adapter); if (rc) return rc; - - if (reset_state == VNIC_CLOSED) - return 0; } } + ibmvnic_disable_irqs(adapter); + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + rc = __ibmvnic_open(netdev); if (rc) { if (list_empty(&adapter->rwi_list)) @@ -1670,8 +1795,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } - netif_carrier_on(netdev); - /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); @@ -1679,6 +1802,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (adapter->reset_reason != VNIC_RESET_FAILOVER) netdev_notify_peers(netdev); + netif_carrier_on(netdev); + return 0; } @@ -1853,6 +1978,12 @@ restart_poll: be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; + dev_kfree_skb_any(rx_buff->skb); + remove_buff_from_pool(adapter, rx_buff); + continue; + } else if (!rx_buff->skb) { + /* free the entry */ + next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); continue; } @@ -1960,6 +2091,23 @@ static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) return wait_for_reset(adapter); } +static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* Some backing hardware adapters can not + * handle packets with a MSS less than 224 + * or with only one segment. + */ + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_size < 224 || + skb_shinfo(skb)->gso_segs == 1) + features &= ~NETIF_F_GSO_MASK; + } + + return features; +} + static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_open = ibmvnic_open, .ndo_stop = ibmvnic_close, @@ -1972,6 +2120,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_poll_controller = ibmvnic_netpoll_controller, #endif .ndo_change_mtu = ibmvnic_change_mtu, + .ndo_features_check = ibmvnic_features_check, }; /* ethtool functions */ @@ -2244,24 +2393,27 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) } static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, - struct ibmvnic_sub_crq_queue *scrq) + struct ibmvnic_sub_crq_queue *scrq, + bool do_h_free) { struct device *dev = &adapter->vdev->dev; long rc; netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); - /* Close the sub-crqs */ - do { - rc = plpar_hcall_norets(H_FREE_SUB_CRQ, - adapter->vdev->unit_address, - scrq->crq_num); - } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + if (do_h_free) { + /* Close the sub-crqs */ + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); - if (rc) { - netdev_err(adapter->netdev, - "Failed to release sub-CRQ %16lx, rc = %ld\n", - scrq->crq_num, rc); + if (rc) { + netdev_err(adapter->netdev, + "Failed to release sub-CRQ %16lx, rc = %ld\n", + scrq->crq_num, rc); + } } dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, @@ -2329,12 +2481,12 @@ zero_page_failed: return NULL; } -static void release_sub_crqs(struct ibmvnic_adapter *adapter) +static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) { int i; if (adapter->tx_scrq) { - for (i = 0; i < adapter->req_tx_queues; i++) { + for (i = 0; i < adapter->num_active_tx_scrqs; i++) { if (!adapter->tx_scrq[i]) continue; @@ -2347,15 +2499,17 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) adapter->tx_scrq[i]->irq = 0; } - release_sub_crq_queue(adapter, adapter->tx_scrq[i]); + release_sub_crq_queue(adapter, adapter->tx_scrq[i], + do_h_free); } kfree(adapter->tx_scrq); adapter->tx_scrq = NULL; + adapter->num_active_tx_scrqs = 0; } if (adapter->rx_scrq) { - for (i = 0; i < adapter->req_rx_queues; i++) { + for (i = 0; i < adapter->num_active_rx_scrqs; i++) { if (!adapter->rx_scrq[i]) continue; @@ -2368,11 +2522,13 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) adapter->rx_scrq[i]->irq = 0; } - release_sub_crq_queue(adapter, adapter->rx_scrq[i]); + release_sub_crq_queue(adapter, adapter->rx_scrq[i], + do_h_free); } kfree(adapter->rx_scrq); adapter->rx_scrq = NULL; + adapter->num_active_rx_scrqs = 0; } } @@ -2413,6 +2569,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_buff *txbuff; union sub_crq *next; int index; @@ -2422,6 +2579,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, restart_loop: while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; + int num_entries = 0; next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { @@ -2431,7 +2589,14 @@ restart_loop: continue; } index = be32_to_cpu(next->tx_comp.correlators[i]); - txbuff = &adapter->tx_pool[pool].tx_buff[index]; + if (index & IBMVNIC_TSO_POOL_MASK) { + tx_pool = &adapter->tso_pool[pool]; + index &= ~IBMVNIC_TSO_POOL_MASK; + } else { + tx_pool = &adapter->tx_pool[pool]; + } + + txbuff = &tx_pool->tx_buff[index]; for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { if (!txbuff->data_dma[j]) @@ -2452,22 +2617,23 @@ restart_loop: txbuff->skb = NULL; } - adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. - producer_index] = index; - adapter->tx_pool[pool].producer_index = - (adapter->tx_pool[pool].producer_index + 1) % - adapter->req_tx_entries_per_subcrq; + num_entries += txbuff->num_entries; + + tx_pool->free_map[tx_pool->producer_index] = index; + tx_pool->producer_index = + (tx_pool->producer_index + 1) % + tx_pool->num_buffers; } /* remove tx_comp scrq*/ next->tx_comp.first = 0; - if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= + if (atomic_sub_return(num_entries, &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, scrq->pool_index)) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); - netdev_info(adapter->netdev, "Started queue %d\n", - scrq->pool_index); + netdev_dbg(adapter->netdev, "Started queue %d\n", + scrq->pool_index); } } @@ -2539,7 +2705,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", scrq->irq, rc); irq_dispose_mapping(scrq->irq); - goto req_rx_irq_failed; + goto req_tx_irq_failed; } } @@ -2575,7 +2741,7 @@ req_tx_irq_failed: free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); } - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); return rc; } @@ -2637,6 +2803,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) for (i = 0; i < adapter->req_tx_queues; i++) { adapter->tx_scrq[i] = allqueues[i]; adapter->tx_scrq[i]->pool_index = i; + adapter->num_active_tx_scrqs++; } adapter->rx_scrq = kcalloc(adapter->req_rx_queues, @@ -2647,6 +2814,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) for (i = 0; i < adapter->req_rx_queues; i++) { adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; adapter->rx_scrq[i]->scrq_num = i; + adapter->num_active_rx_scrqs++; } kfree(allqueues); @@ -2657,7 +2825,7 @@ rx_failed: adapter->tx_scrq = NULL; tx_failed: for (i = 0; i < registered_queues; i++) - release_sub_crq_queue(adapter, allqueues[i]); + release_sub_crq_queue(adapter, allqueues[i], 1); kfree(allqueues); return -1; } @@ -2997,7 +3165,7 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, strncpy(&vlcd->name, adapter->netdev->name, len); } -static void send_login(struct ibmvnic_adapter *adapter) +static int send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; struct ibmvnic_login_buffer *login_buffer; @@ -3013,6 +3181,13 @@ static void send_login(struct ibmvnic_adapter *adapter) struct vnic_login_client_data *vlcd; int i; + if (!adapter->tx_scrq || !adapter->rx_scrq) { + netdev_err(adapter->netdev, + "RX or TX queues are not allocated, device login failed\n"); + return -1; + } + + release_login_rsp_buffer(adapter); client_data_len = vnic_client_data_len(adapter); buffer_size = @@ -3109,7 +3284,7 @@ static void send_login(struct ibmvnic_adapter *adapter) crq.login.len = cpu_to_be32(buffer_size); ibmvnic_send_crq(adapter, &crq); - return; + return 0; buf_rsp_map_failed: kfree(login_rsp_buffer); @@ -3118,7 +3293,7 @@ buf_rsp_alloc_failed: buf_map_failed: kfree(login_buffer); buf_alloc_failed: - return; + return -1; } static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, @@ -3738,6 +3913,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ibmvnic_remove(adapter->vdev); return -EIO; } + release_login_buffer(adapter); complete(&adapter->init_done); return 0; @@ -4282,6 +4458,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); + u64 old_num_rx_queues, old_num_tx_queues; int rc; if (adapter->resetting && !adapter->wait_for_reset) { @@ -4299,6 +4476,9 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) adapter->from_passive_init = false; + old_num_rx_queues = adapter->req_rx_queues; + old_num_tx_queues = adapter->req_tx_queues; + init_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); @@ -4318,10 +4498,18 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) return -1; } - if (adapter->resetting && !adapter->wait_for_reset) - rc = reset_sub_crq_queues(adapter); - else + if (adapter->resetting && !adapter->wait_for_reset) { + if (adapter->req_rx_queues != old_num_rx_queues || + adapter->req_tx_queues != old_num_tx_queues) { + release_sub_crqs(adapter, 0); + rc = init_sub_crqs(adapter); + } else { + rc = reset_sub_crq_queues(adapter); + } + } else { rc = init_sub_crqs(adapter); + } + if (rc) { dev_err(dev, "Initialization of sub crqs failed\n"); release_crq_queue(adapter); @@ -4334,6 +4522,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) release_crq_queue(adapter); } + rc = init_stats_buffers(adapter); + if (rc) + return rc; + + rc = init_stats_token(adapter); + if (rc) + return rc; + return rc; } @@ -4421,7 +4617,7 @@ ibmvnic_register_fail: device_remove_file(&dev->dev, &dev_attr_failover); ibmvnic_init_fail: - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); free_netdev(netdev); @@ -4438,9 +4634,12 @@ static int ibmvnic_remove(struct vio_dev *dev) mutex_lock(&adapter->reset_lock); release_resources(adapter); - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); + release_stats_token(adapter); + release_stats_buffers(adapter); + adapter->state = VNIC_REMOVED; mutex_unlock(&adapter->reset_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index fe21a6e2ddae..89efe700eafe 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -43,6 +43,7 @@ #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 +#define IBMVNIC_TSO_POOL_MASK 0x80000000 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) #define IBMVNIC_BUFFER_HLEN 500 @@ -909,6 +910,7 @@ struct ibmvnic_tx_buff { union sub_crq indir_arr[6]; u8 hdr_data[140]; dma_addr_t indir_dma; + int num_entries; }; struct ibmvnic_tx_pool { @@ -916,11 +918,9 @@ struct ibmvnic_tx_pool { int *free_map; int consumer_index; int producer_index; - wait_queue_head_t ibmvnic_tx_comp_q; - struct task_struct *work_thread; struct ibmvnic_long_term_buff long_term_buff; - struct ibmvnic_long_term_buff tso_ltb; - int tso_index; + int num_buffers; + int buf_size; }; struct ibmvnic_rx_buff { @@ -1043,6 +1043,7 @@ struct ibmvnic_adapter { u64 promisc; struct ibmvnic_tx_pool *tx_pool; + struct ibmvnic_tx_pool *tso_pool; struct completion init_done; int init_done_rc; @@ -1091,8 +1092,11 @@ struct ibmvnic_adapter { u64 opt_rxba_entries_per_subcrq; __be64 tx_rx_desc_req; u8 map_id; - u64 num_active_rx_pools; - u64 num_active_tx_pools; + u32 num_active_rx_scrqs; + u32 num_active_rx_pools; + u32 num_active_rx_napi; + u32 num_active_tx_scrqs; + u32 num_active_tx_pools; struct tasklet_struct tasklet; enum vnic_state state; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1feb54b6d92e..14d287bed33c 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -251,6 +251,20 @@ config I40EVF will be called i40evf. MSI-X interrupt support is required for this driver to work correctly. +config ICE + tristate "Intel(R) Ethernet Connection E800 Series Support" + default n + depends on PCI_MSI + ---help--- + This driver supports Intel(R) Ethernet Connection E800 Series of + devices. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide that can be located at: + + <http://support.intel.com> + + To compile this driver as a module, choose M here. The module + will be called ice. + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 90af7757a885..807a4f8c7e4e 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_I40EVF) += i40evf/ obj-$(CONFIG_FM10K) += fm10k/ +obj-$(CONFIG_ICE) += ice/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 29486478836e..41ad56edfb96 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel PRO/100 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile index 4a6ab1522451..c7caadd3c8af 100644 --- a/drivers/net/ethernet/intel/e1000/Makefile +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 8fd2458060a0..3a0feea2df54 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index dc71e87c3260..3e80ca170dd7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2006 Intel Corporation. diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 3bac9df1c099..6e7e923d57bf 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index 5cf7268cc4e1..f09c569ec19b 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3dd4aeb2706d..d5eb19b86a0a 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h index 33e7c45a4fe4..ae0559b8b011 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h +++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index c9cde352b1c8..345f23927bcc 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index cd391376036c..953e99df420c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index a2162e11673e..ee6d1256fda4 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 6b03c8553e59..924f2c8dfa6c 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index abc6a9abff98..9a24c645f726 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 106de493373c..24e391a4ac68 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel PRO/1000 Linux driver diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index afb7ebe20b24..22883015a695 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * @@ -400,6 +401,10 @@ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */ +#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */ +#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */ +#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 @@ -407,7 +412,7 @@ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ -#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */ /* PBA ECC Register */ #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ @@ -431,12 +436,27 @@ E1000_IMS_RXSEQ | \ E1000_IMS_LSC) +/* These are all of the events related to the OTHER interrupt. + */ +#define IMS_OTHER_MASK ( \ + E1000_IMS_LSC | \ + E1000_IMS_RXO | \ + E1000_IMS_MDAC | \ + E1000_IMS_SRPD | \ + E1000_IMS_ACK | \ + E1000_IMS_MNG) + /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */ +#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */ +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */ #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 2311b31bdcac..da88555ba1fd 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 003cbd605799..64dc0c11147f 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index d803b1a12349..21802396bed6 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 31277d3bb7dc..1551d6ce5341 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * @@ -1367,9 +1368,6 @@ out: * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. - * - * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1385,7 +1383,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 1; + return 0; + mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1393,12 +1392,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - return ret_val; + goto out; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) - return ret_val; + goto out; } /* When connected at 10Mbps half-duplex, some parts are excessively @@ -1431,7 +1430,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; if (hw->mac.type == e1000_pch2lan) emi_addr = I82579_RX_CONFIG; @@ -1453,7 +1452,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; if (hw->mac.type >= e1000_pch_spt) { u16 data; @@ -1462,14 +1461,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (speed == SPEED_1000) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; ret_val = e1e_rphy_locked(hw, PHY_REG(776, 20), &data); if (ret_val) { hw->phy.ops.release(hw); - return ret_val; + goto out; } ptr_gap = (data & (0x3FF << 2)) >> 2; @@ -1483,18 +1482,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) } hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; } else { ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; ret_val = e1e_wphy_locked(hw, PHY_REG(776, 20), 0xC023); hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; } } @@ -1521,7 +1520,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) - return ret_val; + goto out; } if (hw->mac.type >= e1000_pch_lpt) { /* Set platform power management values for @@ -1529,7 +1528,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) - return ret_val; + goto out; } /* Clear link partner's EEE ability */ @@ -1552,9 +1551,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) } if (!link) - return 0; /* No link detected */ - - mac->get_link_status = false; + goto out; switch (hw->mac.type) { case e1000_pch2lan: @@ -1616,12 +1613,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { + if (ret_val) e_dbg("Error configuring flow control\n"); - return ret_val; - } - return 1; + return ret_val; + +out: + mac->get_link_status = true; + return ret_val; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 00a36df02a3f..3c4f82c21084 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index f457c5703d0c..b293464a9f27 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * @@ -410,9 +411,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. - * - * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -426,20 +424,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 1; + return 0; + mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; - - if (!link) - return 0; /* No link detected */ - - mac->get_link_status = false; + if (ret_val || !link) + goto out; /* Check if there was DownShift, must be checked * immediately after link-up @@ -464,12 +458,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { + if (ret_val) e_dbg("Error configuring flow control\n"); - return ret_val; - } - return 1; + return ret_val; + +out: + mac->get_link_status = true; + return ret_val; } /** diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index 8284618af9ff..cb0abf6c76a5 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index cc9b3befc2bc..e027660aeb92 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index 0b9ea5952b07..3268f2e58593 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1298b69f990b..ec4a9759a6f2 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * @@ -1914,30 +1915,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr; - bool enable = true; - - icr = er32(ICR); - if (icr & E1000_ICR_RXO) { - ew32(ICR, E1000_ICR_RXO); - enable = false; - /* napi poll will re-enable Other, make sure it runs */ - if (napi_schedule_prep(&adapter->napi)) { - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } - } + u32 icr = er32(ICR); + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + if (icr & E1000_ICR_LSC) { - ew32(ICR, E1000_ICR_LSC); hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (enable && !test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK); return IRQ_HANDLED; } @@ -2040,7 +2031,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); - adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ ivar |= BIT(31); @@ -2265,7 +2255,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | + IMS_OTHER_MASK); } else if (hw->mac.type >= e1000_pch_lpt) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { @@ -2333,8 +2324,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; - ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, - GFP_KERNEL); + ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -2707,8 +2698,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val | - E1000_IMS_OTHER); + ew32(IMS, adapter->rx_ring->ims_val); else e1000_irq_enable(adapter); } @@ -5101,7 +5091,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = ret_val > 0; + link_active = !hw->mac.get_link_status; } else { link_active = true; } diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 2efd80dfd88e..68949bb41b7b 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 5d46967e0d1f..8e082028be7d 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 47da51864543..2def33eba9e6 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 86ff0969efb6..b8226ed0e338 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 3027f63ee793..d4180b5e9196 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index b366885487a8..f941e5085f44 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 0cb4d365e5ad..16afc3c2a986 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel PRO/1000 Linux driver * Copyright(c) 1999 - 2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index cac645329cea..93277cb99cb7 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel(R) Ethernet Switch Host Interface Driver diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 46973fb234c5..a9cdf763c59d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 736a9f087bc9..e303d88720ef 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. + * Copyright(c) 2013 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -262,6 +263,7 @@ s32 fm10k_stop_hw_generic(struct fm10k_hw *hw) * fm10k_read_hw_stats_32b - Reads value of 32-bit registers * @hw: pointer to the hardware structure * @addr: address of register containing a 32-bit value + * @stat: pointer to structure holding hw stat information * * Function reads the content of the register and returns the delta * between the base and the current value. @@ -281,6 +283,7 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, * fm10k_read_hw_stats_48b - Reads value of 48-bit registers * @hw: pointer to the hardware structure * @addr: address of register containing the lower 32-bit value + * @stat: pointer to structure holding hw stat information * * Function reads the content of 2 registers, combined to represent a 48-bit * statistical value. Extra processing is required to handle overflowing. @@ -461,7 +464,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, /** * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues - * @hw: pointer to the hardware structure * @q: pointer to the ring of hardware statistics queue * @idx: index pointing to the start of the ring iteration * @count: number of queues to iterate over diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h index d51f9c7a47ff..2bdb24d2ca9d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index db4bd8bf9722..c4f733452ef2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 14df09e2d964..43e8d839831f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index c7234f35f8ff..28b6b4e56487 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 760cfa52d02c..30395f5e5e87 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 8e12aae065d8..df8607097e4a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * @@ -28,13 +29,13 @@ #include "fm10k.h" -#define DRV_VERSION "0.22.1-k" +#define DRV_VERSION "0.23.4-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright(c) 2013 - 2017 Intel Corporation."; + "Copyright(c) 2013 - 2018 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 244d3ad58ca7..c01bf30a0c9e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 35c1dbad1330..007e1dfa9b7a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index a38ae5c54da3..45793491d4ba 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. + * Copyright(c) 2013 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -486,7 +487,7 @@ static void fm10k_insert_tunnel_port(struct list_head *ports, /** * fm10k_udp_tunnel_add - * @netdev: network interface device structure + * @dev: network interface device structure * @ti: Tunnel endpoint information * * This function is called when a new UDP tunnel port has been added. @@ -518,8 +519,8 @@ static void fm10k_udp_tunnel_add(struct net_device *dev, /** * fm10k_udp_tunnel_del - * @netdev: network interface device structure - * @ti: Tunnel endpoint information + * @dev: network interface device structure + * @ti: Tunnel end point information * * This function is called when a new UDP tunnel port is deleted. The freed * port will be removed from the list, then we reprogram the offloaded port @@ -803,7 +804,7 @@ int fm10k_queue_vlan_request(struct fm10k_intfc *interface, * @glort: the target glort for this update * @addr: the address to update * @vid: the vid to update - * @sync: whether to add or remove + * @set: whether to add or remove * * This function queues up a MAC request for sending to the switch manager. * A separate thread monitors the queue and sends updates to the switch diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index aa05fb534942..c4a2b688b38b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. + * Copyright(c) 2013 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +30,7 @@ static const struct fm10k_info *fm10k_info_tbl[] = { [fm10k_device_vf] = &fm10k_vf_info, }; -/** +/* * fm10k_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last @@ -211,7 +212,7 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface) /** * fm10k_service_timer - Timer Call-back - * @data: pointer to interface cast into an unsigned long + * @t: pointer to timer data **/ static void fm10k_service_timer(struct timer_list *t) { @@ -649,7 +650,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) /** * fm10k_watchdog_flush_tx - flush queues on host not ready - * @interface - pointer to the device interface structure + * @interface: pointer to the device interface structure **/ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) { @@ -679,7 +680,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) /** * fm10k_watchdog_subtask - check and bring link up - * @interface - pointer to the device interface structure + * @interface: pointer to the device interface structure **/ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) { @@ -703,7 +704,7 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) /** * fm10k_check_hang_subtask - check for hung queues and dropped interrupts - * @interface - pointer to the device interface structure + * @interface: pointer to the device interface structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the @@ -1995,6 +1996,7 @@ skip_tx_dma_drain: /** * fm10k_sw_init - Initialize general software structures * @interface: host interface private structure to initialize + * @ent: PCI device ID entry * * fm10k_sw_init initializes the interface private data structure. * Fields are initialized based on PCI device information and diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index d6406fc31ffb..7ba54c534f8c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. + * Copyright(c) 2013 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1180,7 +1181,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, /** * fm10k_iov_select_vid - Select correct default VLAN ID - * @hw: Pointer to hardware structure + * @vf_info: pointer to VF information structure * @vid: VLAN ID to correct * * Will report an error if the VLAN ID is out of range. For VID = 0, it will diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index e04d41f1a532..ae81f9a16602 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2017 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index f8e87bf086b9..725ecb7abccd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -120,6 +121,7 @@ static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) * @msg: Pointer to message block * @attr_id: Attribute ID * @mac_addr: MAC address to be stored + * @vlan: VLAN to be stored * * This function will reorder a MAC address to be CPU endian and store it * in the attribute buffer. It will return success if provided with a @@ -155,8 +157,8 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, /** * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute * @attr: Pointer to attribute - * @attr_id: Attribute ID * @mac_addr: location of buffer to store MAC address + * @vlan: location of buffer to store VLAN * * This function pulls the MAC address back out of the attribute and will * place it in the array pointed by by mac_addr. It will return success @@ -549,7 +551,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, * @hw: Pointer to hardware structure * @msg: Pointer to message * @mbx: Pointer to mailbox information structure - * @func: Function array containing list of message handling functions + * @data: Pointer to message handler data structure * * This function should be the first function called upon receiving a * message. The handler will identify the message type and call the correct diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index a1f1027fe184..5d2ee759507e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 6bb16c13d9d6..dd23af11e2c1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 337ba65a9411..f06913630b39 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 2662f33c0c71..66a66b73a2f1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2016 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 3da482c3d68d..75437768a07c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 46e9f4e0a02c..a44139c1de80 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -158,9 +159,17 @@ enum i40e_state_t { __I40E_BAD_EEPROM, __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, + __I40E_FD_ATR_AUTO_DISABLED, + __I40E_FD_SB_AUTO_DISABLED, __I40E_RESET_FAILED, __I40E_PORT_SUSPENDED, __I40E_VF_DISABLE, + __I40E_MACVLAN_SYNC_PENDING, + __I40E_UDP_FILTER_SYNC_PENDING, + __I40E_TEMP_LINK_POLLING, + __I40E_CLIENT_SERVICE_REQUESTED, + __I40E_CLIENT_L2_CHANGE, + __I40E_CLIENT_RESET, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; @@ -507,41 +516,34 @@ struct i40e_pf { #define I40E_HW_STOP_FW_LLDP BIT(16) #define I40E_HW_PORT_ID_VALID BIT(17) #define I40E_HW_RESTART_AUTONEG BIT(18) - - u64 flags; -#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0) -#define I40E_FLAG_MSI_ENABLED BIT_ULL(1) -#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2) -#define I40E_FLAG_RSS_ENABLED BIT_ULL(3) -#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4) -#define I40E_FLAG_FILTER_SYNC BIT_ULL(5) -#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6) -#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7) -#define I40E_FLAG_DCB_ENABLED BIT_ULL(8) -#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9) -#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10) -#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11) -#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12) -#define I40E_FLAG_MFP_ENABLED BIT_ULL(13) -#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14) -#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15) -#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16) -#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17) -#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18) -#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19) -#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20) -#define I40E_FLAG_LEGACY_RX BIT_ULL(21) -#define I40E_FLAG_PTP BIT_ULL(22) -#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23) -#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24) -#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25) -#define I40E_FLAG_CLIENT_RESET BIT_ULL(26) -#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27) -#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28) -#define I40E_FLAG_TC_MQPRIO BIT_ULL(29) -#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30) -#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31) -#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32) +#define I40E_HW_STOPPABLE_FW_LLDP BIT(19) + + u32 flags; +#define I40E_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40E_FLAG_MSI_ENABLED BIT(1) +#define I40E_FLAG_MSIX_ENABLED BIT(2) +#define I40E_FLAG_RSS_ENABLED BIT(3) +#define I40E_FLAG_VMDQ_ENABLED BIT(4) +#define I40E_FLAG_SRIOV_ENABLED BIT(5) +#define I40E_FLAG_DCB_CAPABLE BIT(6) +#define I40E_FLAG_DCB_ENABLED BIT(7) +#define I40E_FLAG_FD_SB_ENABLED BIT(8) +#define I40E_FLAG_FD_ATR_ENABLED BIT(9) +#define I40E_FLAG_MFP_ENABLED BIT(10) +#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11) +#define I40E_FLAG_VEB_MODE_ENABLED BIT(12) +#define I40E_FLAG_VEB_STATS_ENABLED BIT(13) +#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14) +#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15) +#define I40E_FLAG_LEGACY_RX BIT(16) +#define I40E_FLAG_PTP BIT(17) +#define I40E_FLAG_IWARP_ENABLED BIT(18) +#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19) +#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20) +#define I40E_FLAG_TC_MQPRIO BIT(21) +#define I40E_FLAG_FD_SB_INACTIVE BIT(22) +#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23) +#define I40E_FLAG_DISABLE_FW_LLDP BIT(24) struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -824,6 +826,7 @@ struct i40e_q_vector { struct i40e_ring_container rx; struct i40e_ring_container tx; + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ u8 num_ringpairs; /* total number of ring pairs in vector */ cpumask_t affinity_mask; @@ -832,8 +835,6 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; -#define ITR_COUNTDOWN_START 100 - u8 itr_countdown; /* when 0 should adjust ITR */ } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -1041,6 +1042,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); +void i40e_client_update_msix_info(struct i40e_pf *pf); int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id); /** * i40e_irq_dynamic_enable - Enable default interrupt generation settings @@ -1109,4 +1111,10 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); +int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add); +int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index e78971605e0b..843fc7781ef8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 2349fbe04bd2..0a8749ee9fd3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index a852775d3059..0244923edeb8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -1914,6 +1915,43 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_DEFAULT = 0xFF, }; +#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \ + BIT_ULL(I40E_PHY_TYPE_XAUI) | \ + BIT_ULL(I40E_PHY_TYPE_XFI) | \ + BIT_ULL(I40E_PHY_TYPE_SFI) | \ + BIT_ULL(I40E_PHY_TYPE_XLAUI) | \ + BIT_ULL(I40E_PHY_TYPE_XLPPI) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \ + BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \ + BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \ + BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC)) + #define I40E_LINK_SPEED_100MB_SHIFT 0x1 #define I40E_LINK_SPEED_1000MB_SHIFT 0x2 #define I40E_LINK_SPEED_10GB_SHIFT 0x3 diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index 926811ad44ac..abed0c52e782 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 0de9610c1d8d..d8ce4999864f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -287,6 +288,17 @@ out: return capable; } +void i40e_client_update_msix_info(struct i40e_pf *pf) +{ + struct i40e_client_instance *cdev = pf->cinst; + + if (!cdev || !cdev->client) + return; + + cdev->lan_info.msix_count = pf->num_iwarp_msix; + cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; +} + /** * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct @@ -328,9 +340,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf) return; } - cdev->lan_info.msix_count = pf->num_iwarp_msix; - cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; - mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); if (mac) @@ -340,6 +349,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf) cdev->client = registered_client; pf->cinst = cdev; + + i40e_client_update_msix_info(pf); } /** @@ -365,9 +376,8 @@ void i40e_client_subtask(struct i40e_pf *pf) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int ret = 0; - if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) + if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state)) return; - pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED; cdev = pf->cinst; /* If we're down or resetting, just bail */ @@ -448,7 +458,7 @@ int i40e_lan_add_device(struct i40e_pf *pf) * added, we can schedule a subtask to go initiate the clients if * they can be launched at probe time. */ - pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); out: @@ -543,7 +553,7 @@ static void i40e_client_prepare(struct i40e_client *client) pf = ldev->pf; i40e_client_add_instance(pf); /* Start the client subtask */ - pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); } mutex_unlock(&i40e_device_mutex); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index ba55c889e4c5..9d464d40bc17 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ef5a868aae46..c0a3dae8a2db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -1208,6 +1209,29 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) return media; } +/** + * i40e_poll_globr - Poll for Global Reset completion + * @hw: pointer to the hardware structure + * @retry_limit: how many times to retry before failure + **/ +static i40e_status i40e_poll_globr(struct i40e_hw *hw, + u32 retry_limit) +{ + u32 cnt, reg = 0; + + for (cnt = 0; cnt < retry_limit; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + return 0; + msleep(100); + } + + hw_dbg(hw, "Global reset failed.\n"); + hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); + + return I40E_ERR_RESET_FAILED; +} + #define I40E_PF_RESET_WAIT_COUNT_A0 200 #define I40E_PF_RESET_WAIT_COUNT 200 /** @@ -1284,14 +1308,14 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; reg2 = rd32(hw, I40E_GLGEN_RSTAT); - if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { - hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n"); - hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2); - return I40E_ERR_NOT_READY; - } + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) + break; usleep_range(1000, 2000); } - if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + if (i40e_poll_globr(hw, grst_del)) + return I40E_ERR_RESET_FAILED; + } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { hw_dbg(hw, "PF reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } @@ -2415,6 +2439,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * i40e_aq_set_switch_config * @hw: pointer to the hardware structure * @flags: bit flag values to set + * @mode: cloud filter mode * @valid_flags: which bit flags to set * @mode: cloud filter mode * @cmd_details: pointer to command details structure or NULL @@ -3200,9 +3225,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; + u16 id, ocp_cfg_word0; + i40e_status status; u8 major_rev; u32 i = 0; - u16 id; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; @@ -3389,6 +3415,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, hw->num_ports++; } + /* OCP cards case: if a mezz is removed the Ethernet port is at + * disabled state in PRTGEN_CNF register. Additional NVM read is + * needed in order to check if we are dealing with OCP card. + * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting + * physical ports results in wrong partition id calculation and thus + * not supporting WoL. + */ + if (hw->mac.type == I40E_MAC_X722) { + if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { + status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, + 2 * I40E_SR_OCP_CFG_WORD0, + sizeof(ocp_cfg_word0), + &ocp_cfg_word0, true, NULL); + if (!status && + (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) + hw->num_ports = 4; + i40e_release_nvm(hw); + } + } + valid_functions = p->valid_functions; num_functions = 0; while (valid_functions) { @@ -5531,7 +5577,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, * function. * **/ -i40e_status +enum i40e_status_code i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) @@ -5625,7 +5671,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, * function. * **/ -i40e_status +enum i40e_status_code i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 55079fe3ed63..9fec728dc4b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 92d01042c1f6..4f806386cb22 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 886e667f2f1c..502818e3da78 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 4c3b4243cf65..d494dcaf18d0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -155,8 +156,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - dev_info(&pf->pdev->dev, - " vlgrp: & = %p\n", vsi->active_vlans); + dev_info(&pf->pdev->dev, " active_vlans is %s\n", + vsi->active_vlans ? "<valid>" : "<null>"); dev_info(&pf->pdev->dev, " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); @@ -270,14 +271,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) continue; dev_info(&pf->pdev->dev, - " rx_rings[%i]: desc = %p\n", - i, rx_ring->desc); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", - i, rx_ring->dev, - rx_ring->netdev, - rx_ring->rx_bi); - dev_info(&pf->pdev->dev, " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", i, *rx_ring->state, rx_ring->queue_index, @@ -307,17 +300,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->rx_stats.realloc_count, rx_ring->rx_stats.page_reuse_count); dev_info(&pf->pdev->dev, - " rx_rings[%i]: size = %i, dma = 0x%08lx\n", - i, rx_ring->size, - (unsigned long int)rx_ring->dma); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: vsi = %p, q_vector = %p\n", - i, rx_ring->vsi, - rx_ring->q_vector); + " rx_rings[%i]: size = %i\n", + i, rx_ring->size); dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_itr_setting = %d (%s)\n", - i, rx_ring->rx_itr_setting, - ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); + " rx_rings[%i]: itr_setting = %d (%s)\n", + i, rx_ring->itr_setting, + ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); @@ -326,14 +314,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) continue; dev_info(&pf->pdev->dev, - " tx_rings[%i]: desc = %p\n", - i, tx_ring->desc); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", - i, tx_ring->dev, - tx_ring->netdev, - tx_ring->tx_bi); - dev_info(&pf->pdev->dev, " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", i, *tx_ring->state, tx_ring->queue_index, @@ -355,20 +335,15 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->tx_stats.tx_busy, tx_ring->tx_stats.tx_done_old); dev_info(&pf->pdev->dev, - " tx_rings[%i]: size = %i, dma = 0x%08lx\n", - i, tx_ring->size, - (unsigned long int)tx_ring->dma); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: vsi = %p, q_vector = %p\n", - i, tx_ring->vsi, - tx_ring->q_vector); + " tx_rings[%i]: size = %i\n", + i, tx_ring->size); dev_info(&pf->pdev->dev, " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_itr_setting = %d (%s)\n", - i, tx_ring->tx_itr_setting, - ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed"); + " tx_rings[%i]: itr_setting = %d (%s)\n", + i, tx_ring->itr_setting, + ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); } rcu_read_unlock(); dev_info(&pf->pdev->dev, @@ -466,8 +441,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); - if (vsi->back) - dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 8e46098bad57..ad6a66ccb576 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index 76ed56641864..df3e60470f8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index 0b5911652084..be8341763475 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 2f5bee713fef..b974482ff630 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -230,6 +231,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), + I40E_PRIV_FLAG("link-down-on-close", + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), I40E_PRIV_FLAG("disable-source-pruning", I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), @@ -857,7 +860,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev, if (hw->device_id == I40E_DEV_ID_KX_B || hw->device_id == I40E_DEV_ID_KX_C || hw->device_id == I40E_DEV_ID_20G_KR2 || - hw->device_id == I40E_DEV_ID_20G_KR2_A) { + hw->device_id == I40E_DEV_ID_20G_KR2_A || + hw->device_id == I40E_DEV_ID_25G_B || + hw->device_id == I40E_DEV_ID_KX_X722) { netdev_info(netdev, "Changing settings is not supported on backplane.\n"); return -EOPNOTSUPP; } @@ -868,23 +873,21 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; - memset(&safe_ks, 0, sizeof(safe_ks)); + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + i40e_get_link_ksettings(netdev, &safe_ks); + /* Get link modes supported by hardware and check against modes * requested by the user. Return an error if unsupported mode was set. */ - i40e_phy_type_to_ethtool(pf, &safe_ks); if (!bitmap_subset(copy_ks.link_modes.advertising, safe_ks.link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; - /* get our own copy of the bits to check against */ - memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); - safe_ks.base.cmd = copy_ks.base.cmd; - safe_ks.base.link_mode_masks_nwords = - copy_ks.base.link_mode_masks_nwords; - i40e_get_link_ksettings(netdev, &safe_ks); - /* set autoneg back to what it currently is */ copy_ks.base.autoneg = safe_ks.base.autoneg; @@ -2244,14 +2247,14 @@ static int __i40e_get_coalesce(struct net_device *netdev, rx_ring = vsi->rx_rings[queue]; tx_ring = vsi->tx_rings[queue]; - if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting)) + if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) ec->use_adaptive_rx_coalesce = 1; - if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting)) + if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) ec->use_adaptive_tx_coalesce = 1; - ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; /* we use the _usecs_high to store/set the interrupt rate limit * that the hardware supports, that almost but not quite @@ -2311,34 +2314,35 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_q_vector *q_vector; - u16 vector, intrl; + u16 intrl; intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); - rx_ring->rx_itr_setting = ec->rx_coalesce_usecs; - tx_ring->tx_itr_setting = ec->tx_coalesce_usecs; + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); if (ec->use_adaptive_rx_coalesce) - rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC; + rx_ring->itr_setting |= I40E_ITR_DYNAMIC; else - rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; if (ec->use_adaptive_tx_coalesce) - tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC; + tx_ring->itr_setting |= I40E_ITR_DYNAMIC; else - tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; q_vector = rx_ring->q_vector; - q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting); - vector = vsi->base_vector + q_vector->v_idx; - wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector = tx_ring->q_vector; - q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting); - vector = vsi->base_vector + q_vector->v_idx; - wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); + /* The interrupt handler itself will take care of programming + * the Tx and Rx ITR values based on the values we have entered + * into the q_vector, no need to write the values now. + */ + + wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl); i40e_flush(hw); } @@ -2364,11 +2368,11 @@ static int __i40e_set_coalesce(struct net_device *netdev, vsi->work_limit = ec->tx_max_coalesced_frames_irq; if (queue < 0) { - cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting; - cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting; + cur_rx_itr = vsi->rx_rings[0]->itr_setting; + cur_tx_itr = vsi->tx_rings[0]->itr_setting; } else if (queue < vsi->num_queue_pairs) { - cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting; - cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting; + cur_rx_itr = vsi->rx_rings[queue]->itr_setting; + cur_tx_itr = vsi->tx_rings[queue]->itr_setting; } else { netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", vsi->num_queue_pairs - 1); @@ -2396,7 +2400,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + if (ec->rx_coalesce_usecs > I40E_MAX_ITR) { netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } @@ -2407,16 +2411,16 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + if (ec->tx_coalesce_usecs > I40E_MAX_ITR) { netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) - ec->rx_coalesce_usecs = I40E_MIN_ITR << 1; + ec->rx_coalesce_usecs = I40E_MIN_ITR; if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) - ec->tx_coalesce_usecs = I40E_MIN_ITR << 1; + ec->tx_coalesce_usecs = I40E_MIN_ITR; intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); @@ -3947,7 +3951,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return -EOPNOTSUPP; - if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) + if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) return -ENOSPC; if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || @@ -4406,6 +4410,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) } flags_complete: + changed_flags = orig_flags ^ new_flags; + /* Before we finalize any flag changes, we need to perform some * checks to ensure that the changes are supported and safe. */ @@ -4415,38 +4421,27 @@ flags_complete: !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) return -EOPNOTSUPP; - /* Disable FW LLDP not supported if NPAR active or if FW - * API version < 1.7 + /* If the driver detected FW LLDP was disabled on init, this flag could + * be set, however we do not support _changing_ the flag if NPAR is + * enabled or FW API version < 1.7. There are situations where older + * FW versions/NPAR enabled PFs could disable LLDP, however we _must_ + * not allow the user to enable/disable LLDP with this flag on + * unsupported FW versions. */ - if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (pf->hw.func_caps.npar_enable) { - dev_warn(&pf->pdev->dev, - "Unable to stop FW LLDP if NPAR active\n"); - return -EOPNOTSUPP; - } - - if (pf->hw.aq.api_maj_ver < 1 || - (pf->hw.aq.api_maj_ver == 1 && - pf->hw.aq.api_min_ver < 7)) { + if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) { dev_warn(&pf->pdev->dev, - "FW ver does not support stopping FW LLDP\n"); + "Device does not support changing FW LLDP\n"); return -EOPNOTSUPP; } } - /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg returns anything but the old value, this means that - * something else has modified the flags variable since we copied it - * originally. We'll just punt with an error and log something in the - * message buffer. + /* Now that we've checked to ensure that the new flags are valid, load + * them into place. Since we only modify flags either (a) during + * initialization or (b) while holding the RTNL lock, we don't need + * anything fancy here. */ - if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { - dev_warn(&pf->pdev->dev, - "Unable to update pf->flags as it was modified by another thread...\n"); - return -EAGAIN; - } - - changed_flags = orig_flags ^ new_flags; + pf->flags = new_flags; /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were @@ -4456,7 +4451,7 @@ flags_complete: /* Flush current ATR settings if ATR was disabled */ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } @@ -4479,6 +4474,12 @@ flags_complete: } } + if ((changed_flags & pf->flags & + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && + (pf->flags & I40E_FLAG_MFP_ENABLED)) + dev_warn(&pf->pdev->dev, + "Turning on link-down-on-close flag may affect other partitions\n"); + if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { struct i40e_dcbx_config *dcbcfg; diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c deleted file mode 100644 index 2d1253c5b7a1..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ /dev/null @@ -1,1571 +0,0 @@ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ - -#include <linux/if_ether.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_device.h> -#include <scsi/fc/fc_fs.h> -#include <scsi/fc/fc_fip.h> -#include <scsi/fc/fc_fcoe.h> -#include <scsi/libfc.h> -#include <scsi/libfcoe.h> -#include <uapi/linux/dcbnl.h> - -#include "i40e.h" -#include "i40e_fcoe.h" - -/** - * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF - * @sof: the FCoE start of frame delimiter - **/ -static inline bool i40e_fcoe_sof_is_class2(u8 sof) -{ - return (sof == FC_SOF_I2) || (sof == FC_SOF_N2); -} - -/** - * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF - * @sof: the FCoE start of frame delimiter - **/ -static inline bool i40e_fcoe_sof_is_class3(u8 sof) -{ - return (sof == FC_SOF_I3) || (sof == FC_SOF_N3); -} - -/** - * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW - * @sof: the input SOF value from the frame - **/ -static inline bool i40e_fcoe_sof_is_supported(u8 sof) -{ - return i40e_fcoe_sof_is_class2(sof) || - i40e_fcoe_sof_is_class3(sof); -} - -/** - * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame - * @skb: the frame whose EOF is to be pulled from - **/ -static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof) -{ - *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; - - if (!i40e_fcoe_sof_is_supported(*sof)) - return -EINVAL; - return 0; -} - -/** - * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW - * @eof: the input EOF value from the frame - **/ -static inline bool i40e_fcoe_eof_is_supported(u8 eof) -{ - return (eof == FC_EOF_N) || (eof == FC_EOF_T) || - (eof == FC_EOF_NI) || (eof == FC_EOF_A); -} - -/** - * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame - * @skb: the frame whose EOF is to be pulled from - **/ -static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) -{ - /* the first byte of the last dword is EOF */ - skb_copy_bits(skb, skb->len - 4, eof, 1); - - if (!i40e_fcoe_eof_is_supported(*eof)) - return -EINVAL; - return 0; -} - -/** - * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming - * @eof: the input eof value from the frame - * - * The FC EOF is converted to the value understood by HW for descriptor - * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() - * first and that already checks for all supported valid eof values. - **/ -static inline u32 i40e_fcoe_ctxt_eof(u8 eof) -{ - switch (eof) { - case FC_EOF_N: - return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N; - case FC_EOF_T: - return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T; - case FC_EOF_NI: - return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI; - case FC_EOF_A: - return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; - default: - /* Supported valid eof shall be already checked by - * calling i40e_fcoe_eof_is_supported() first, - * therefore this default case shall never hit. - */ - WARN_ON(1); - return -EINVAL; - } -} - -/** - * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid - * @xid: the exchange id - **/ -static inline bool i40e_fcoe_xid_is_valid(u16 xid) -{ - return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX); -} - -/** - * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated - * @pf: pointer to PF - * @ddp: sw DDP context - * - * Unmap the scatter-gather list associated with the given SW DDP context - * - * Returns: data length already ddp-ed in bytes - * - **/ -static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf, - struct i40e_fcoe_ddp *ddp) -{ - if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags)) - return; - - if (ddp->sgl) { - dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc, - DMA_FROM_DEVICE); - ddp->sgl = NULL; - ddp->sgc = 0; - } - - if (ddp->pool) { - dma_pool_free(ddp->pool, ddp->udl, ddp->udp); - ddp->pool = NULL; - } -} - -/** - * i40e_fcoe_ddp_clear - clear the given SW DDP context - * @ddp - SW DDP context - **/ -static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp) -{ - memset(ddp, 0, sizeof(struct i40e_fcoe_ddp)); - ddp->xid = FC_XID_UNKNOWN; - ddp->flags = __I40E_FCOE_DDP_NONE; -} - -/** - * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE - * @id: the prog id for the programming status Rx descriptor write-back - **/ -static inline bool i40e_fcoe_progid_is_fcoe(u8 id) -{ - return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || - (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS); -} - -/** - * i40e_fcoe_fc_get_xid - get xid from the frame header - * @fh: the fc frame header - * - * In case the incoming frame's exchange is originated from - * the initiator, then received frame's exchange id is ANDed - * with fc_cpu_mask bits to get the same cpu on which exchange - * was originated, otherwise just use the current cpu. - * - * Returns ox_id if exchange originator, rx_id if responder - **/ -static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh) -{ - u32 f_ctl = ntoh24(fh->fh_f_ctl); - - return (f_ctl & FC_FC_EX_CTX) ? - be16_to_cpu(fh->fh_ox_id) : - be16_to_cpu(fh->fh_rx_id); -} - -/** - * i40e_fcoe_fc_frame_header - get fc frame header from skb - * @skb: packet - * - * This checks if there is a VLAN header and returns the data - * pointer to the start of the fc_frame_header. - * - * Returns pointer to the fc_frame_header - **/ -static inline struct fc_frame_header *i40e_fcoe_fc_frame_header( - struct sk_buff *skb) -{ - void *fh = skb->data + sizeof(struct fcoe_hdr); - - if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) - fh += sizeof(struct vlan_hdr); - - return (struct fc_frame_header *)fh; -} - -/** - * i40e_fcoe_ddp_put - release the DDP context for a given exchange id - * @netdev: the corresponding net_device - * @xid: the exchange id that corresponding DDP context will be released - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_done - * and it is expected to be called by ULD, i.e., FCP layer of libfc - * to release the corresponding ddp context when the I/O is done. - * - * Returns : data length already ddp-ed in bytes - **/ -static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - int len = 0; - struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid]; - - if (!fcoe || !ddp) - goto out; - - if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags)) - len = ddp->len; - i40e_fcoe_ddp_unmap(pf, ddp); -out: - return len; -} - -/** - * i40e_fcoe_sw_init - sets up the HW for FCoE - * @pf: pointer to PF - **/ -void i40e_init_pf_fcoe(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - u32 val; - - pf->flags &= ~I40E_FLAG_FCOE_ENABLED; - pf->num_fcoe_qps = 0; - pf->fcoe_hmc_cntx_num = 0; - pf->fcoe_hmc_filt_num = 0; - - if (!pf->hw.func_caps.fcoe) { - dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n"); - return; - } - - if (!pf->hw.func_caps.dcb) { - dev_warn(&pf->pdev->dev, - "Hardware is not DCB capable not enabling FCoE.\n"); - return; - } - - /* enable FCoE hash filter */ - val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)); - val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); - val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); - val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val); - - /* enable flag */ - pf->flags |= I40E_FLAG_FCOE_ENABLED; - pf->num_fcoe_qps = I40E_DEFAULT_FCOE; - - /* Reserve 4K DDP contexts and 20K filter size for FCoE */ - pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) * - I40E_DMA_CNTX_BASE_SIZE; - pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + - BIT(I40E_HASH_FILTER_SIZE_16K) * - I40E_HASH_FILTER_BASE_SIZE; - - /* FCoE object: max 16K filter buckets and 4K DMA contexts */ - pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K; - pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; - - /* Setup max frame with FCoE_MTU plus L2 overheads */ - val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL); - val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; - val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) - << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); - i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val); - - dev_info(&pf->pdev->dev, "FCoE is supported.\n"); -} - -/** - * i40e_get_fcoe_tc_map - Return TC map for FCoE APP - * @pf: pointer to PF - * - **/ -u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) -{ - struct i40e_dcb_app_priority_table app; - struct i40e_hw *hw = &pf->hw; - u8 enabled_tc = 0; - u8 tc, i; - /* Get the FCoE APP TLV */ - struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - - for (i = 0; i < dcbcfg->numapps; i++) { - app = dcbcfg->app[i]; - if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app.protocolid == ETH_P_FCOE) { - tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= BIT(tc); - break; - } - } - - /* TC0 if there is no TC defined for FCoE APP TLV */ - enabled_tc = enabled_tc ? enabled_tc : 0x1; - - return enabled_tc; -} - -/** - * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI - * @vsi: pointer to the associated VSI struct - * @ctxt: pointer to the associated VSI context to be passed to HW - * - * Returns 0 on success or < 0 on error - **/ -int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) -{ - struct i40e_aqc_vsi_properties_data *info = &ctxt->info; - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u8 enabled_tc = 0; - - if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { - dev_err(&pf->pdev->dev, - "FCoE is not enabled for this device\n"); - return -EPERM; - } - - /* initialize the hardware for FCoE */ - ctxt->pf_num = hw->pf_id; - ctxt->vf_num = 0; - ctxt->uplink_seid = vsi->uplink_seid; - ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; - ctxt->flags = I40E_AQ_VSI_TYPE_PF; - - /* FCoE VSI would need the following sections */ - info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); - - /* FCoE VSI does not need these sections */ - info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | - I40E_AQ_VSI_PROP_VLAN_VALID | - I40E_AQ_VSI_PROP_CAS_PV_VALID | - I40E_AQ_VSI_PROP_INGRESS_UP_VALID | - I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); - - if (i40e_is_vsi_uplink_mode_veb(vsi)) { - info->valid_sections |= - cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - info->switch_id = - cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - } - enabled_tc = i40e_get_fcoe_tc_map(pf); - i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); - - /* set up queue option section: only enable FCoE */ - info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA; - - return 0; -} - -/** - * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable, - * indicating the upper FCoE protocol stack is ready to use FCoE - * offload features. - * - * @netdev: pointer to the netdev that FCoE is created on - * - * Returns 0 on success - * - * in RTNL - * - **/ -int i40e_fcoe_enable(struct net_device *netdev) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - - if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { - netdev_err(netdev, "HW does not support FCoE.\n"); - return -ENODEV; - } - - if (vsi->type != I40E_VSI_FCOE) { - netdev_err(netdev, "interface does not support FCoE.\n"); - return -EBUSY; - } - - atomic_inc(&fcoe->refcnt); - - return 0; -} - -/** - * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack. - * @dev: pointer to the netdev that FCoE is created on - * - * Returns 0 on success - * - **/ -int i40e_fcoe_disable(struct net_device *netdev) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - - if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { - netdev_err(netdev, "device does not support FCoE\n"); - return -ENODEV; - } - if (vsi->type != I40E_VSI_FCOE) - return -EBUSY; - - if (!atomic_dec_and_test(&fcoe->refcnt)) - return -EINVAL; - - netdev_info(netdev, "FCoE disabled\n"); - - return 0; -} - -/** - * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP - * @fcoe: the FCoE sw object - * @dev: the device that the pool is associated with - * @cpu: the cpu for this pool - * - **/ -static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe, - struct device *dev, - unsigned int cpu) -{ - struct i40e_fcoe_ddp_pool *ddp_pool; - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - if (!ddp_pool->pool) { - dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu); - return; - } - dma_pool_destroy(ddp_pool->pool); - ddp_pool->pool = NULL; -} - -/** - * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP - * @fcoe: the FCoE sw object - * @dev: the device that the pool is associated with - * @cpu: the cpu for this pool - * - * Returns 0 on successful or non zero on failure - * - **/ -static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe, - struct device *dev, - unsigned int cpu) -{ - struct i40e_fcoe_ddp_pool *ddp_pool; - struct dma_pool *pool; - char pool_name[32]; - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); - if (ddp_pool && ddp_pool->pool) { - dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu); - return 0; - } - snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu); - pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, - I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE); - if (!pool) { - dev_err(dev, "dma_pool_create %s failed\n", pool_name); - return -ENOMEM; - } - ddp_pool->pool = pool; - return 0; -} - -/** - * i40e_fcoe_free_ddp_resources - release FCoE DDP resources - * @vsi: the vsi FCoE is associated with - * - **/ -void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - int cpu, i; - - /* do nothing if not FCoE VSI */ - if (vsi->type != I40E_VSI_FCOE) - return; - - /* do nothing if no DDP pools were allocated */ - if (!fcoe->ddp_pool) - return; - - for (i = 0; i < I40E_FCOE_DDP_MAX; i++) - i40e_fcoe_ddp_put(vsi->netdev, i); - - for_each_possible_cpu(cpu) - i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu); - - free_percpu(fcoe->ddp_pool); - fcoe->ddp_pool = NULL; - - netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n", - vsi->id, vsi->seid); -} - -/** - * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources - * @vsi: the VSI FCoE is associated with - * - * Returns 0 on successful or non zero on failure - * - **/ -int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - struct device *dev = &pf->pdev->dev; - struct i40e_fcoe *fcoe = &pf->fcoe; - unsigned int cpu; - int i; - - if (vsi->type != I40E_VSI_FCOE) - return -ENODEV; - - /* do nothing if no DDP pools were allocated */ - if (fcoe->ddp_pool) - return -EEXIST; - - /* allocate per CPU memory to track DDP pools */ - fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool); - if (!fcoe->ddp_pool) { - dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n"); - return -ENOMEM; - } - - /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) { - if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu)) - continue; - - dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu); - i40e_fcoe_free_ddp_resources(vsi); - return -ENOMEM; - } - - /* initialize the sw context */ - for (i = 0; i < I40E_FCOE_DDP_MAX; i++) - i40e_fcoe_ddp_clear(&fcoe->ddp[i]); - - netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n", - vsi->id, vsi->seid); - - return 0; -} - -/** - * i40e_fcoe_handle_status - check the Programming Status for FCoE - * @rx_ring: the Rx ring for this descriptor - * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor. - * - * Check if this is the Rx Programming Status descriptor write-back for FCoE. - * This is used to verify if the context/filter programming or invalidation - * requested by SW to the HW is successful or not and take actions accordingly. - **/ -void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) -{ - struct i40e_pf *pf = rx_ring->vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - struct i40e_fcoe_ddp *ddp; - u32 error; - u16 xid; - u64 qw; - - /* we only care for FCoE here */ - if (!i40e_fcoe_progid_is_fcoe(prog_id)) - return; - - xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) & - (I40E_FCOE_DDP_MAX - 1); - - if (!i40e_fcoe_xid_is_valid(xid)) - return; - - ddp = &fcoe->ddp[xid]; - WARN_ON(xid != ddp->xid); - - qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - - /* DDP context programming status: failure or success */ - if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) { - if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) { - dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n", - xid, ddp->xid); - ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT; - } - if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) { - dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n", - xid, ddp->xid); - ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT; - } - } - - /* DDP context invalidation status: failure or success */ - if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) { - if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) { - dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n", - xid, ddp->xid); - ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT; - } - /* clear the flag so we can retry invalidation */ - clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags); - } - - /* unmap DMA */ - i40e_fcoe_ddp_unmap(pf, ddp); - i40e_fcoe_ddp_clear(ddp); -} - -/** - * i40e_fcoe_handle_offload - check ddp status and mark it done - * @adapter: i40e adapter - * @rx_desc: advanced rx descriptor - * @skb: the skb holding the received data - * - * This checks ddp status. - * - * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates - * not passing the skb to ULD, > 0 indicates is the length of data - * being ddped. - * - **/ -int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct i40e_pf *pf = rx_ring->vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - struct fc_frame_header *fh = NULL; - struct i40e_fcoe_ddp *ddp = NULL; - u32 status, fltstat; - u32 error, fcerr; - int rc = -EINVAL; - u16 ptype; - u16 xid; - u64 qw; - - /* check this rxd is for programming status */ - qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - /* packet descriptor, check packet type */ - ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - if (!i40e_rx_is_fcoe(ptype)) - goto out_no_ddp; - - error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) & - I40E_RX_DESC_FCOE_ERROR_MASK; - - /* check stateless offload error */ - if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) { - dev_err(&pf->pdev->dev, "Protocol Error\n"); - skb->ip_summed = CHECKSUM_NONE; - } else { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - - /* check hw status on ddp */ - status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & - I40E_RX_DESC_FLTSTAT_FCMASK; - - /* now we are ready to check DDP */ - fh = i40e_fcoe_fc_frame_header(skb); - xid = i40e_fcoe_fc_get_xid(fh); - if (!i40e_fcoe_xid_is_valid(xid)) - goto out_no_ddp; - - /* non DDP normal receive, return to the protocol stack */ - if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH) - goto out_no_ddp; - - /* do we have a sw ddp context setup ? */ - ddp = &fcoe->ddp[xid]; - if (!ddp->sgl) - goto out_no_ddp; - - /* fetch xid from hw rxd wb, which should match up the sw ctxt */ - xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id); - if (ddp->xid != xid) { - dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n", - ddp->xid, xid); - goto out_put_ddp; - } - - /* the same exchange has already errored out */ - if (ddp->fcerr) { - dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n", - xid, ddp->fcerr, fcerr); - goto out_put_ddp; - } - - /* fcoe param is valid by now with correct DDPed length */ - ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param); - ddp->fcerr = fcerr; - /* header posting only, useful only for target mode and debugging */ - if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) { - /* For target mode, we get header of the last packet but it - * does not have the FCoE trailer field, i.e., CRC and EOF - * Ordered Set since they are offloaded by the HW, so fill - * it up correspondingly to allow the packet to pass through - * to the upper protocol stack. - */ - u32 f_ctl = ntoh24(fh->fh_f_ctl); - - if ((f_ctl & FC_FC_END_SEQ) && - (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) { - struct fcoe_crc_eof *crc = NULL; - - crc = skb_put(skb, sizeof(*crc)); - crc->fcoe_eof = FC_EOF_T; - } else { - /* otherwise, drop the header only frame */ - rc = 0; - goto out_no_ddp; - } - } - -out_put_ddp: - /* either we got RSP or we have an error, unmap DMA in both cases */ - i40e_fcoe_ddp_unmap(pf, ddp); - if (ddp->len && !ddp->fcerr) { - int pkts; - - rc = ddp->len; - i40e_fcoe_ddp_clear(ddp); - ddp->len = rc; - pkts = DIV_ROUND_UP(rc, 2048); - rx_ring->stats.bytes += rc; - rx_ring->stats.packets += pkts; - rx_ring->q_vector->rx.total_bytes += rc; - rx_ring->q_vector->rx.total_packets += pkts; - set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags); - } - -out_no_ddp: - return rc; -} - -/** - * i40e_fcoe_ddp_setup - called to set up ddp context - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * @target_mode: indicates this is a DDP request for target - * - * Returns : 1 for success and 0 for no DDP on this I/O - **/ -static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc, - int target_mode) -{ - static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN; - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_fcoe_ddp_pool *ddp_pool; - struct i40e_pf *pf = np->vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - unsigned int i, j, dmacount; - struct i40e_fcoe_ddp *ddp; - unsigned int firstoff = 0; - unsigned int thisoff = 0; - unsigned int thislen = 0; - struct scatterlist *sg; - dma_addr_t addr = 0; - unsigned int len; - - if (xid >= I40E_FCOE_DDP_MAX) { - dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid); - return 0; - } - - /* no DDP if we are already down or resetting */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_NEEDS_RESTART, &pf->state)) { - dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n", - xid); - return 0; - } - - ddp = &fcoe->ddp[xid]; - if (ddp->sgl) { - dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); - return 0; - } - i40e_fcoe_ddp_clear(ddp); - - if (!fcoe->ddp_pool) { - dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid); - return 0; - } - - ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); - if (!ddp_pool->pool) { - dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid); - goto out_noddp; - } - - /* setup dma from scsi command sgl */ - dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); - if (dmacount == 0) { - dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n", - sgl, sgc); - goto out_noddp_unmap; - } - - /* alloc the udl from our ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); - if (!ddp->udl) { - dev_info(&pf->pdev->dev, - "Failed allocated ddp context, xid 0x%x\n", xid); - goto out_noddp_unmap; - } - - j = 0; - ddp->len = 0; - for_each_sg(sgl, sg, dmacount, i) { - addr = sg_dma_address(sg); - len = sg_dma_len(sg); - ddp->len += len; - while (len) { - /* max number of buffers allowed in one DDP context */ - if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) { - dev_info(&pf->pdev->dev, - "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n", - xid, i, j, dmacount, (u64)addr); - goto out_noddp_free; - } - - /* get the offset of length of current buffer */ - thisoff = addr & ((dma_addr_t)bufflen - 1); - thislen = min_t(unsigned int, (bufflen - thisoff), len); - /* all but the 1st buffer (j == 0) - * must be aligned on bufflen - */ - if ((j != 0) && (thisoff)) - goto out_noddp_free; - - /* all but the last buffer - * ((i == (dmacount - 1)) && (thislen == len)) - * must end at bufflen - */ - if (((i != (dmacount - 1)) || (thislen != len)) && - ((thislen + thisoff) != bufflen)) - goto out_noddp_free; - - ddp->udl[j] = (u64)(addr - thisoff); - /* only the first buffer may have none-zero offset */ - if (j == 0) - firstoff = thisoff; - len -= thislen; - addr += thislen; - j++; - } - } - /* only the last buffer may have non-full bufflen */ - ddp->lastsize = thisoff + thislen; - ddp->firstoff = firstoff; - ddp->list_len = j; - ddp->pool = ddp_pool->pool; - ddp->sgl = sgl; - ddp->sgc = sgc; - ddp->xid = xid; - if (target_mode) - set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); - set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags); - - put_cpu(); - return 1; /* Success */ - -out_noddp_free: - dma_pool_free(ddp->pool, ddp->udl, ddp->udp); - i40e_fcoe_ddp_clear(ddp); - -out_noddp_unmap: - dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); -out_noddp: - put_cpu(); - return 0; -} - -/** - * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. - * - * Returns : 1 for success and 0 for no ddp - **/ -static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); -} - -/** - * i40e_fcoe_ddp_target - called to set up ddp context in target mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_target - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. The DDP in target mode is a write I/O request - * from the initiator. - * - * Returns : 1 for success and 0 for no ddp - **/ -static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); -} - -/** - * i40e_fcoe_program_ddp - programs the HW DDP related descriptors - * @tx_ring: transmit ring for this packet - * @skb: the packet to be sent out - * @sof: the SOF to indicate class of service - * - * Determine if it is READ/WRITE command, and finds out if there is - * a matching SW DDP context for this command. DDP is applicable - * only in case of READ if initiator or WRITE in case of - * responder (via checking XFER_RDY). - * - * Note: caller checks sof and ddp sw context - * - * Returns : none - * - **/ -static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, - struct sk_buff *skb, - struct i40e_fcoe_ddp *ddp, u8 sof) -{ - struct i40e_fcoe_filter_context_desc *filter_desc = NULL; - struct i40e_fcoe_queue_context_desc *queue_desc = NULL; - struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL; - struct i40e_pf *pf = tx_ring->vsi->back; - u16 i = tx_ring->next_to_use; - struct fc_frame_header *fh; - u64 flags_rsvd_lanq = 0; - bool target_mode; - - /* check if abort is still pending */ - if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) { - dev_warn(&pf->pdev->dev, - "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n", - ddp->xid, ddp->flags); - return; - } - - /* set the flag to indicate this is programmed */ - if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) { - dev_warn(&pf->pdev->dev, - "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n", - ddp->xid, ddp->flags); - return; - } - - /* Prepare the DDP context descriptor */ - ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i); - i++; - if (i == tx_ring->count) - i = 0; - - ddp_desc->type_cmd_foff_lsize = - cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX | - ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K << - I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) | - ((u64)ddp->firstoff << - I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) | - ((u64)ddp->lastsize << - I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)); - ddp_desc->rsvd = cpu_to_le64(0); - - /* target mode needs last packet in the sequence */ - target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); - if (target_mode) - ddp_desc->type_cmd_foff_lsize |= - cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH); - - /* Prepare queue_context descriptor */ - queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++); - if (i == tx_ring->count) - i = 0; - queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp)); - queue_desc->flen_tph = cpu_to_le64(ddp->list_len | - ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC | - I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) << - I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT)); - - /* Prepare filter_context_desc */ - filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i); - i++; - if (i == tx_ring->count) - i = 0; - - fh = (struct fc_frame_header *)skb_transport_header(skb); - filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset)); - filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt)); - filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid << - I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT); - - flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP; - flags_rsvd_lanq |= (u64)(target_mode ? - I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP : - I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT); - - flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ? - I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 : - I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3); - - flags_rsvd_lanq |= ((u64)skb->queue_mapping << - I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT); - filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq); - - /* By this time, all offload related descriptors has been programmed */ - tx_ring->next_to_use = i; -} - -/** - * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort - * @tx_ring: transmit ring for this packet - * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS - * @ddp: the SW DDP context for this DDP - * - * Programs the Tx context descriptor to do DDP invalidation. - **/ -static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring, - struct sk_buff *skb, - struct i40e_fcoe_ddp *ddp) -{ - struct i40e_tx_context_desc *context_desc; - int i; - - if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) - return; - - i = tx_ring->next_to_use; - context_desc = I40E_TX_CTXTDESC(tx_ring, i); - i++; - if (i == tx_ring->count) - i = 0; - - context_desc->tunneling_params = cpu_to_le32(0); - context_desc->l2tag2 = cpu_to_le16(0); - context_desc->rsvd = cpu_to_le16(0); - context_desc->type_cmd_tso_mss = cpu_to_le64( - I40E_TX_DESC_DTYPE_FCOE_CTX | - (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL << - I40E_TXD_CTX_QW1_CMD_SHIFT) | - (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND << - I40E_TXD_CTX_QW1_CMD_SHIFT)); - tx_ring->next_to_use = i; -} - -/** - * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP - * @tx_ring: transmit ring for this packet - * @skb: the packet to be sent out - * @sof: the SOF to indicate class of service - * - * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is - * a matching SW DDP context for this command. DDP is applicable - * only in case of READ if initiator or WRITE in case of - * responder (via checking XFER_RDY). In case this is an ABTS, send - * just invalidate the context. - **/ -static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring, - struct sk_buff *skb, u8 sof) -{ - struct i40e_pf *pf = tx_ring->vsi->back; - struct i40e_fcoe *fcoe = &pf->fcoe; - struct fc_frame_header *fh; - struct i40e_fcoe_ddp *ddp; - u32 f_ctl; - u8 r_ctl; - u16 xid; - - fh = (struct fc_frame_header *)skb_transport_header(skb); - f_ctl = ntoh24(fh->fh_f_ctl); - r_ctl = fh->fh_r_ctl; - ddp = NULL; - - if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) { - /* exchange responder? if so, XFER_RDY for write */ - xid = ntohs(fh->fh_rx_id); - if (i40e_fcoe_xid_is_valid(xid)) { - ddp = &fcoe->ddp[xid]; - if ((ddp->xid == xid) && - (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) - i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); - } - } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) { - /* exchange originator, check READ cmd */ - xid = ntohs(fh->fh_ox_id); - if (i40e_fcoe_xid_is_valid(xid)) { - ddp = &fcoe->ddp[xid]; - if ((ddp->xid == xid) && - (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) - i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); - } - } else if (r_ctl == FC_RCTL_BA_ABTS) { - /* exchange originator, check ABTS */ - xid = ntohs(fh->fh_ox_id); - if (i40e_fcoe_xid_is_valid(xid)) { - ddp = &fcoe->ddp[xid]; - if ((ddp->xid == xid) && - (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) - i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp); - } - } -} - -/** - * i40e_fcoe_tso - set up FCoE TSO - * @tx_ring: ring to send buffer on - * @skb: send buffer - * @tx_flags: collected send information - * @hdr_len: the tso header length - * @sof: the SOF to indicate class of service - * - * Note must already have sof checked to be either class 2 or class 3 before - * calling this function. - * - * Returns 1 to indicate sequence segmentation offload is properly setup - * or returns 0 to indicate no tso is needed, otherwise returns error - * code to drop the frame. - **/ -static int i40e_fcoe_tso(struct i40e_ring *tx_ring, - struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len, u8 sof) -{ - struct i40e_tx_context_desc *context_desc; - u32 cd_type, cd_cmd, cd_tso_len, cd_mss; - struct fc_frame_header *fh; - u64 cd_type_cmd_tso_mss; - - /* must match gso type as FCoE */ - if (!skb_is_gso(skb)) - return 0; - - /* is it the expected gso type for FCoE ?*/ - if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { - netdev_err(skb->dev, - "wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); - return -EINVAL; - } - - /* header and trailer are inserted by hw */ - *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) + - sizeof(struct fcoe_crc_eof); - - /* check sof to decide a class 2 or 3 TSO */ - if (likely(i40e_fcoe_sof_is_class3(sof))) - cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3; - else - cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2; - - /* param field valid? */ - fh = (struct fc_frame_header *)skb_transport_header(skb); - if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) - cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF; - - /* fill the field values */ - cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX; - cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; - cd_type_cmd_tso_mss = - ((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | - ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); - - /* grab the next descriptor */ - context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); - tx_ring->next_to_use++; - if (tx_ring->next_to_use == tx_ring->count) - tx_ring->next_to_use = 0; - - context_desc->tunneling_params = 0; - context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK) - >> I40E_TX_FLAGS_VLAN_SHIFT); - context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); - - return 1; -} - -/** - * i40e_fcoe_tx_map - build the tx descriptor - * @tx_ring: ring to send buffer on - * @skb: send buffer - * @first: first buffer info buffer to use - * @tx_flags: collected send information - * @hdr_len: ptr to the size of the packet header - * @eof: the frame eof value - * - * Note, for FCoE, sof and eof are already checked - **/ -static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring, - struct sk_buff *skb, - struct i40e_tx_buffer *first, - u32 tx_flags, u8 hdr_len, u8 eof) -{ - u32 td_offset = 0; - u32 td_cmd = 0; - u32 maclen; - - /* insert CRC */ - td_cmd = I40E_TX_DESC_CMD_ICRC; - - /* setup MACLEN */ - maclen = skb_network_offset(skb); - if (tx_flags & I40E_TX_FLAGS_SW_VLAN) - maclen += sizeof(struct vlan_hdr); - - if (skb->protocol == htons(ETH_P_FCOE)) { - /* for FCoE, maclen should exclude ether type */ - maclen -= 2; - /* setup type as FCoE and EOF insertion */ - td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof)); - /* setup FCoELEN and FCLEN */ - td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT) | - ((sizeof(struct fc_frame_header) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)); - /* trim to exclude trailer */ - pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof)); - } - - /* MACLEN is ether header length in words not bytes */ - td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); -} - -/** - * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC - * @skb: the skb to be adjusted - * - * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then - * adjusts the skb header pointers correspondingly. Otherwise, returns false. - **/ -static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb) -{ - __be16 protocol = skb->protocol; - - skb_reset_mac_header(skb); - skb->mac_len = sizeof(struct ethhdr); - if (protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb); - - protocol = veth->h_vlan_encapsulated_proto; - skb->mac_len += sizeof(struct vlan_hdr); - } - - /* FCoE or FIP only */ - if ((protocol != htons(ETH_P_FIP)) && - (protocol != htons(ETH_P_FCOE))) - return -EINVAL; - - /* set header to L2 of FCoE/FIP */ - skb_set_network_header(skb, skb->mac_len); - if (protocol == htons(ETH_P_FIP)) - return 0; - - /* set header to L3 of FC */ - skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); - return 0; -} - -/** - * i40e_fcoe_xmit_frame - transmit buffer - * @skb: send buffer - * @netdev: the fcoe netdev - * - * Returns 0 if sent, else an error code - **/ -static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct i40e_netdev_priv *np = netdev_priv(skb->dev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; - struct i40e_tx_buffer *first; - u32 tx_flags = 0; - int fso, count; - u8 hdr_len = 0; - u8 sof = 0; - u8 eof = 0; - - if (i40e_fcoe_set_skb_header(skb)) - goto out_drop; - - count = i40e_xmit_descriptor_count(skb); - if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; - count = i40e_txd_use_count(skb->len); - tx_ring->tx_stats.tx_linearize++; - } - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } - - /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) - goto out_drop; - - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - - /* FIP is a regular L2 traffic w/o offload */ - if (skb->protocol == htons(ETH_P_FIP)) - goto out_send; - - /* check sof and eof, only supports FC Class 2 or 3 */ - if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) { - netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof); - goto out_drop; - } - - /* always do FCCRC for FCoE */ - tx_flags |= I40E_TX_FLAGS_FCCRC; - - /* check we should do sequence offload */ - fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof); - if (fso < 0) - goto out_drop; - else if (fso) - tx_flags |= I40E_TX_FLAGS_FSO; - else - i40e_fcoe_handle_ddp(tx_ring, skb, sof); - -out_send: - /* send out the packet */ - i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof); - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; - -out_drop: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -/** - * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns error as operation not permitted - * - **/ -static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu) -{ - netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n"); - return -EPERM; -} - -/** - * i40e_fcoe_set_features - set the netdev feature flags - * @netdev: ptr to the netdev being adjusted - * @features: the feature set that the stack is suggesting - * - **/ -static int i40e_fcoe_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - - if (features & NETIF_F_HW_VLAN_CTAG_RX) - i40e_vlan_stripping_enable(vsi); - else - i40e_vlan_stripping_disable(vsi); - - return 0; -} - -static const struct net_device_ops i40e_fcoe_netdev_ops = { - .ndo_open = i40e_open, - .ndo_stop = i40e_close, - .ndo_get_stats64 = i40e_get_netdev_stats_struct, - .ndo_set_rx_mode = i40e_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = i40e_set_mac, - .ndo_change_mtu = i40e_fcoe_change_mtu, - .ndo_do_ioctl = i40e_ioctl, - .ndo_tx_timeout = i40e_tx_timeout, - .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, - .ndo_setup_tc = __i40e_setup_tc, - -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = i40e_netpoll, -#endif - .ndo_start_xmit = i40e_fcoe_xmit_frame, - .ndo_fcoe_enable = i40e_fcoe_enable, - .ndo_fcoe_disable = i40e_fcoe_disable, - .ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get, - .ndo_fcoe_ddp_done = i40e_fcoe_ddp_put, - .ndo_fcoe_ddp_target = i40e_fcoe_ddp_target, - .ndo_set_features = i40e_fcoe_set_features, -}; - -/* fcoe network device type */ -static struct device_type fcoe_netdev_type = { - .name = "fcoe", -}; - -/** - * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI - * @vsi: pointer to the associated VSI struct - * @ctxt: pointer to the associated VSI context to be passed to HW - * - * Returns 0 on success or < 0 on error - **/ -void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) -{ - struct i40e_hw *hw = &vsi->back->hw; - struct i40e_pf *pf = vsi->back; - - if (vsi->type != I40E_VSI_FCOE) - return; - - netdev->features = (NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - - netdev->vlan_features = netdev->features; - netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1; - netdev->features |= NETIF_F_ALL_FCOE; - netdev->vlan_features |= NETIF_F_ALL_FCOE; - netdev->hw_features |= netdev->features; - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->priv_flags |= IFF_SUPP_NOFCS; - - strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); - netdev->mtu = FCOE_MTU; - SET_NETDEV_DEV(netdev, &pf->pdev->dev); - SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); - /* set different dev_port value 1 for FCoE netdev than the default - * zero dev_port value for PF netdev, this helps biosdevname user - * tool to differentiate them correctly while both attached to the - * same PCI function. - */ - netdev->dev_port = 1; - spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, hw->mac.san_addr, 0); - i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0); - i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0); - i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0); - spin_unlock_bh(&vsi->mac_filter_hash_lock); - - /* use san mac */ - ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); - ether_addr_copy(netdev->perm_addr, hw->mac.san_addr); - /* fcoe netdev ops */ - netdev->netdev_ops = &i40e_fcoe_netdev_ops; -} - -/** - * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI - * @pf: the PF that VSI is associated with - * - **/ -void i40e_fcoe_vsi_setup(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi; - u16 seid; - int i; - - if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) - return; - - for (i = 0; i < pf->num_alloc_vsi; i++) { - vsi = pf->vsi[i]; - if (vsi && vsi->type == I40E_VSI_FCOE) { - dev_warn(&pf->pdev->dev, - "FCoE VSI already created\n"); - return; - } - } - - seid = pf->vsi[pf->lan_vsi]->seid; - vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); - if (vsi) { - dev_dbg(&pf->pdev->dev, - "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", - vsi->seid, vsi->id, vsi->uplink_seid, seid); - } else { - dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); - } -} diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h deleted file mode 100644 index a93174ddeaba..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ /dev/null @@ -1,127 +0,0 @@ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ - -#ifndef _I40E_FCOE_H_ -#define _I40E_FCOE_H_ - -/* FCoE HW context helper macros */ -#define I40E_DDP_CONTEXT_DESC(R, i) \ - (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i])) - -#define I40E_QUEUE_CONTEXT_DESC(R, i) \ - (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i])) - -#define I40E_FILTER_CONTEXT_DESC(R, i) \ - (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) - -/* receive queue descriptor filter status for FCoE */ -#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 -#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ -#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */ -#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */ -#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */ - -/* receive queue descriptor error codes for FCoE */ -#define I40E_RX_DESC_FCOE_ERROR_MASK \ - (I40E_RX_DESC_ERROR_L3L4E_PROT | \ - I40E_RX_DESC_ERROR_L3L4E_FC | \ - I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \ - I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN) - -/* receive queue descriptor programming error */ -#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \ - (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1) - -#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \ - (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) - -#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ - BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) -#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ - BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) - -#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ - I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) -#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \ - I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT - -/* FCoE DDP related definitions */ -#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ -#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ -#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */ -#define I40E_FCOE_DDP_PTR_ALIGN 16 -#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t)) -#define I40E_FCOE_DDP_BUF_MIN 4096 -#define I40E_FCOE_DDP_MAX 2048 -#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 - -/* supported netdev features for FCoE */ -#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \ - NETIF_F_HW_VLAN_CTAG_TX | \ - NETIF_F_HW_VLAN_CTAG_RX | \ - NETIF_F_HW_VLAN_CTAG_FILTER) - -/* DDP context flags */ -enum i40e_fcoe_ddp_flags { - __I40E_FCOE_DDP_NONE = 1, - __I40E_FCOE_DDP_TARGET, - __I40E_FCOE_DDP_INITALIZED, - __I40E_FCOE_DDP_PROGRAMMED, - __I40E_FCOE_DDP_DONE, - __I40E_FCOE_DDP_ABORTED, - __I40E_FCOE_DDP_UNMAPPED, -}; - -/* DDP SW context struct */ -struct i40e_fcoe_ddp { - int len; - u16 xid; - u16 firstoff; - u16 lastsize; - u16 list_len; - u8 fcerr; - u8 prerr; - unsigned long flags; - unsigned int sgc; - struct scatterlist *sgl; - dma_addr_t udp; - u64 *udl; - struct dma_pool *pool; - -}; - -struct i40e_fcoe_ddp_pool { - struct dma_pool *pool; -}; - -struct i40e_fcoe { - unsigned long mode; - atomic_t refcnt; - struct i40e_fcoe_ddp_pool __percpu *ddp_pool; - struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX]; -}; - -#endif /* _I40E_FCOE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index a7c7b1d9b7c8..6d4b590f851b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index d90669211392..7b5fd33d70ae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index daa9204426d4..cd40dc487b38 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index e74128db5be5..79e1396735d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e31adbc75f9c..16229998fb1e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -69,12 +70,6 @@ static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); -static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, - bool add); -static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, - bool add); static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type); @@ -215,8 +210,8 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { dev_info(&pf->pdev->dev, - "param err: pile=%p needed=%d id=0x%04x\n", - pile, needed, id); + "param err: pile=%s needed=%d id=0x%04x\n", + pile ? "<valid>" : "<null>", needed, id); return -EINVAL; } @@ -1088,13 +1083,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && - !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) + !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && - !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) + !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; @@ -1380,21 +1375,14 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; - /* If we're in overflow promisc mode, set the state directly - * to failed, so we don't bother to try sending the filter - * to the hardware. - */ - if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state)) - f->state = I40E_FILTER_FAILED; - else - f->state = I40E_FILTER_NEW; + f->state = I40E_FILTER_NEW; INIT_HLIST_NODE(&f->hlist); key = i40e_addr_to_hkey(macaddr); hash_add(vsi->mac_filter_hash, &f->hlist, key); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /* If we're asked to add a filter that has been marked for removal, it @@ -1444,7 +1432,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state); } /** @@ -1967,7 +1955,7 @@ static void i40e_set_rx_mode(struct net_device *netdev) /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } } @@ -2116,17 +2104,16 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, * @list: the list of filters to send to firmware * @add_head: Position in the add hlist * @num_add: the number of filters to add - * @promisc_change: set to true on exit if promiscuous mode was forced on * * Send a request to firmware via AdminQ to add a chunk of filters. Will set - * promisc_changed to true if the firmware has run out of space for more - * filters. + * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of + * space for more filters. */ static void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_add_macvlan_element_data *list, struct i40e_new_mac_filter *add_head, - int num_add, bool *promisc_changed) + int num_add) { struct i40e_hw *hw = &vsi->back->hw; int aq_err, fcnt; @@ -2136,7 +2123,6 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, fcnt = i40e_update_filter_state(num_add, list, add_head); if (fcnt != num_add) { - *promisc_changed = true; set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", @@ -2177,11 +2163,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, NULL); } - if (aq_ret) + if (aq_ret) { + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, - "Error %s setting broadcast promiscuous mode on %s\n", + "Error %s, forcing overflow promiscuous on %s\n", i40e_aq_str(hw, hw->aq.asq_last_status), vsi_name); + } return aq_ret; } @@ -2267,9 +2255,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct i40e_new_mac_filter *new, *add_head = NULL; struct i40e_hw *hw = &vsi->back->hw; + bool old_overflow, new_overflow; unsigned int failed_filters = 0; unsigned int vlan_filters = 0; - bool promisc_changed = false; char vsi_name[16] = "PF"; int filter_list_len = 0; i40e_status aq_ret = 0; @@ -2291,6 +2279,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) usleep_range(1000, 2000); pf = vsi->back; + old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); + if (vsi->netdev) { changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; vsi->current_netdev_flags = vsi->netdev->flags; @@ -2423,12 +2413,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { - if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, - vsi->state)) { - new->state = I40E_FILTER_FAILED; - continue; - } - /* handle broadcast filters by updating the broadcast * promiscuous flag instead of adding a MAC filter. */ @@ -2464,15 +2448,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_add == filter_list_len) { i40e_aqc_add_filters(vsi, vsi_name, add_list, - add_head, num_add, - &promisc_changed); + add_head, num_add); memset(add_list, 0, list_size); num_add = 0; } } if (num_add) { i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, - num_add, &promisc_changed); + num_add); } /* Now move all of the filters from the temp add list back to * the VSI's list. @@ -2501,24 +2484,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } spin_unlock_bh(&vsi->mac_filter_hash_lock); - /* If promiscuous mode has changed, we need to calculate a new - * threshold for when we are safe to exit - */ - if (promisc_changed) - vsi->promisc_threshold = (vsi->active_filters * 3) / 4; - /* Check if we are able to exit overflow promiscuous mode. We can * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ - if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) && - !promisc_changed && !failed_filters && - (vsi->active_filters < vsi->promisc_threshold)) { + if (old_overflow && !failed_filters && + vsi->active_filters < vsi->promisc_threshold) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); - promisc_changed = true; vsi->promisc_threshold = 0; } @@ -2528,6 +2503,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) goto out; } + new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); + + /* If we are entering overflow promiscuous, we need to calculate a new + * threshold for when we are safe to exit + */ + if (!old_overflow && new_overflow) + vsi->promisc_threshold = (vsi->active_filters * 3) / 4; + /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; @@ -2548,12 +2531,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } - if ((changed_flags & IFF_PROMISC) || promisc_changed) { + if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || - test_bit(__I40E_VSI_OVERFLOW_PROMISC, - vsi->state)); + new_overflow); aq_ret = i40e_set_promiscuous(pf, cur_promisc); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, @@ -2595,9 +2577,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { int v; - if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) + if (!pf) + return; + if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; - pf->flags &= ~I40E_FLAG_FILTER_SYNC; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && @@ -2606,7 +2589,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) if (ret) { /* come back and try again later */ - pf->flags |= I40E_FLAG_FILTER_SYNC; + set_bit(__I40E_MACVLAN_SYNC_PENDING, + pf->state); break; } } @@ -2650,8 +2634,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); - pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | - I40E_FLAG_CLIENT_L2_CHANGE); + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); return 0; } @@ -2738,22 +2722,6 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) } /** - * i40e_vlan_rx_register - Setup or shutdown vlan offload - * @netdev: network interface to be adjusted - * @features: netdev features to test if VLAN offload is enabled or not - **/ -static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - - if (features & NETIF_F_HW_VLAN_CTAG_RX) - i40e_vlan_stripping_enable(vsi); - else - i40e_vlan_stripping_disable(vsi); -} - -/** * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address * @vsi: the vsi being configured * @vid: vlan id to be added (0 = untagged only , -1 = any) @@ -2928,7 +2896,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) if (!vsi->netdev) return; - i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); + if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), @@ -3449,15 +3420,20 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[i]; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); - q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->rx.next_update = jiffies + 1; + q_vector->rx.target_itr = + ITR_TO_REG(vsi->rx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), - q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); - q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + + q_vector->tx.next_update = jiffies + 1; + q_vector->tx.target_itr = + ITR_TO_REG(vsi->tx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), - q_vector->tx.itr); + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; + wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); @@ -3558,13 +3534,14 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) u32 val; /* set the ITR configuration */ - q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); - q_vector->rx.latency_range = I40E_LOW_LATENCY; - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); - q_vector->tx.latency_range = I40E_LOW_LATENCY; - wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); + q_vector->rx.next_update = jiffies + 1; + q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->tx.next_update = jiffies + 1; + q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); + wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; i40e_enable_misc_int_causes(pf); @@ -4745,9 +4722,9 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; - pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) - pf->flags |= I40E_FLAG_CLIENT_RESET; + set_bit(__I40E_CLIENT_RESET, pf->state); } /** @@ -5375,7 +5352,7 @@ out: * @vsi: VSI to be configured * **/ -int i40e_get_link_speed(struct i40e_vsi *vsi) +static int i40e_get_link_speed(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; @@ -6518,7 +6495,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) /* On the next run of the service_task, notify any clients of the new * opened netdev */ - pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); return 0; @@ -6560,6 +6537,75 @@ int i40e_up(struct i40e_vsi *vsi) } /** + * i40e_force_link_state - Force the link status + * @pf: board private structure + * @is_up: whether the link state should be forced up or down + **/ +static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config = {0}; + struct i40e_hw *hw = &pf->hw; + i40e_status err; + u64 mask; + + /* Get the current phy config */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + + /* If link needs to go up, but was not forced to go down, + * no need for a flap + */ + if (is_up && abilities.phy_type != 0) + return I40E_SUCCESS; + + /* To force link we need to set bits for all supported PHY types, + * but there are now more than 32, so we need to split the bitmap + * across two fields. + */ + mask = I40E_PHY_TYPES_BITMASK; + config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; + config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; + /* Copy the old settings, except of phy_type */ + config.abilities = abilities.abilities; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + err = i40e_aq_set_phy_config(hw, &config, NULL); + + if (err) { + dev_err(&pf->pdev->dev, + "set phy config ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + return err; + } + + /* Update the link info */ + err = i40e_update_link_info(hw); + if (err) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + msleep(1000); + i40e_update_link_info(hw); + } + + i40e_aq_set_link_restart_an(hw, true, NULL); + + return I40E_SUCCESS; +} + +/** * i40e_down - Shutdown the connection processing * @vsi: the VSI being stopped **/ @@ -6576,6 +6622,9 @@ void i40e_down(struct i40e_vsi *vsi) } i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); + if (vsi->type == I40E_VSI_MAIN && + vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) + i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); for (i = 0; i < vsi->num_queue_pairs; i++) { @@ -6848,8 +6897,8 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter, * Add or delete a cloud filter for a specific flow spec. * Returns 0 if the filter were successfully added. **/ -static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, bool add) +int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, bool add) { struct i40e_aqc_cloud_filters_element_data cld_filter; struct i40e_pf *pf = vsi->back; @@ -6915,9 +6964,9 @@ static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, * Add or delete a cloud filter for a specific flow spec using big buffer. * Returns 0 if the filter were successfully added. **/ -static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, - bool add) +int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add) { struct i40e_aqc_cloud_filters_element_bb cld_filter; struct i40e_pf *pf = vsi->back; @@ -7537,6 +7586,9 @@ int i40e_open(struct net_device *netdev) netif_carrier_off(netdev); + if (i40e_force_link_state(pf, true)) + return -EAGAIN; + err = i40e_vsi_open(vsi); if (err) return err; @@ -7985,8 +8037,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); - pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | - I40E_FLAG_CLIENT_L2_CHANGE); + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } exit: @@ -8087,6 +8139,85 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf) } /** + * i40e_reenable_fdir_sb - Restore FDir SB capability + * @pf: board private structure + **/ +static void i40e_reenable_fdir_sb(struct i40e_pf *pf) +{ + if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); +} + +/** + * i40e_reenable_fdir_atr - Restore FDir ATR capability + * @pf: board private structure + **/ +static void i40e_reenable_fdir_atr(struct i40e_pf *pf) +{ + if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { + /* ATR uses the same filtering logic as SB rules. It only + * functions properly if the input set mask is at the default + * settings. It is safe to restore the default input set + * because there are no active TCPv4 filter rules. + */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); + } +} + +/** + * i40e_delete_invalid_filter - Delete an invalid FDIR filter + * @pf: board private structure + * @filter: FDir filter to remove + */ +static void i40e_delete_invalid_filter(struct i40e_pf *pf, + struct i40e_fdir_filter *filter) +{ + /* Update counters */ + pf->fdir_pf_active_filters--; + pf->fd_inv = 0; + + switch (filter->flow_type) { + case TCP_V4_FLOW: + pf->fd_tcp4_filter_cnt--; + break; + case UDP_V4_FLOW: + pf->fd_udp4_filter_cnt--; + break; + case SCTP_V4_FLOW: + pf->fd_sctp4_filter_cnt--; + break; + case IP_USER_FLOW: + switch (filter->ip4_proto) { + case IPPROTO_TCP: + pf->fd_tcp4_filter_cnt--; + break; + case IPPROTO_UDP: + pf->fd_udp4_filter_cnt--; + break; + case IPPROTO_SCTP: + pf->fd_sctp4_filter_cnt--; + break; + case IPPROTO_IP: + pf->fd_ip4_filter_cnt--; + break; + } + break; + } + + /* Remove the filter from the list and free memory */ + hlist_del(&filter->fdir_node); + kfree(filter); +} + +/** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ @@ -8104,40 +8235,23 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || - (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { - if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { - pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED; - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (I40E_DEBUG_FD & pf->hw.debug_mask)) - dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); - } - } + (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) + i40e_reenable_fdir_sb(pf); /* We should wait for even more space before re-enabling ATR. * Additionally, we cannot enable ATR as long as we still have TCP SB * rules active. */ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && - (pf->fd_tcp4_filter_cnt == 0)) { - if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { - pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (I40E_DEBUG_FD & pf->hw.debug_mask)) - dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); - } - } + (pf->fd_tcp4_filter_cnt == 0)) + i40e_reenable_fdir_atr(pf); /* if hw had a problem adding a filter, delete it */ if (pf->fd_inv > 0) { hlist_for_each_entry_safe(filter, node, - &pf->fdir_filter_list, fdir_node) { - if (filter->fd_id == pf->fd_inv) { - hlist_del(&filter->fdir_node); - kfree(filter); - pf->fdir_pf_active_filters--; - pf->fd_inv = 0; - } - } + &pf->fdir_filter_list, fdir_node) + if (filter->fd_id == pf->fd_inv) + i40e_delete_invalid_filter(pf, filter); } } @@ -8174,7 +8288,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } pf->fd_flush_timestamp = jiffies; - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); @@ -8194,7 +8308,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); if (!disable_atr && !pf->fd_tcp4_filter_cnt) - pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); @@ -8318,13 +8432,12 @@ static void i40e_link_event(struct i40e_pf *pf) /* On success, disable temp link polling */ if (status == I40E_SUCCESS) { - if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) - pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING; + clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status * returns I40E_SUCCESS */ - pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; + set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; @@ -8376,7 +8489,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) pf->service_timer_previous = jiffies; if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || - (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) + test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack @@ -9215,6 +9328,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } i40e_get_oem_version(&pf->hw); + if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && + ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || + hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { + /* The following delay is necessary for 4.33 firmware and older + * to recover after EMP reset. 200 ms should suffice but we + * put here 300 ms to be sure that FW is ready to operate + * after reset. + */ + mdelay(300); + } + /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); @@ -9593,7 +9717,7 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf) pf->pending_udp_bitmap |= BIT_ULL(i); } - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } /** @@ -9607,11 +9731,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) u16 port; int i; - if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) + if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) return; - pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; - for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { pf->pending_udp_bitmap &= ~BIT_ULL(i); @@ -9663,17 +9785,15 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - if (pf->flags & I40E_FLAG_CLIENT_RESET) { + if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); - pf->flags &= ~I40E_FLAG_CLIENT_RESET; } else { i40e_client_subtask(pf); - if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) { + if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, + pf->state)) i40e_notify_client_of_l2_param_changes( pf->vsi[pf->lan_vsi]); - pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE; - } } i40e_sync_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf); @@ -9937,18 +10057,17 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) mutex_lock(&pf->switch_mutex); if (!pf->vsi[vsi->idx]) { - dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", - vsi->idx, vsi->idx, vsi, vsi->type); + dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", + vsi->idx, vsi->idx, vsi->type); goto unlock_vsi; } if (pf->vsi[vsi->idx] != vsi) { dev_err(&pf->pdev->dev, - "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", + "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", pf->vsi[vsi->idx]->idx, - pf->vsi[vsi->idx], pf->vsi[vsi->idx]->type, - vsi->idx, vsi, vsi->type); + vsi->idx, vsi->type); goto unlock_vsi; } @@ -10018,7 +10137,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; - ring->tx_itr_setting = pf->tx_itr_default; + ring->itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = ring++; if (!i40e_enabled_xdp_vsi(vsi)) @@ -10036,7 +10155,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); - ring->tx_itr_setting = pf->tx_itr_default; + ring->itr_setting = pf->tx_itr_default; vsi->xdp_rings[i] = ring++; setup_rx: @@ -10049,7 +10168,7 @@ setup_rx: ring->count = vsi->num_desc; ring->size = 0; ring->dcb_tc = 0; - ring->rx_itr_setting = pf->rx_itr_default; + ring->itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = ring; } @@ -10328,9 +10447,6 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll, NAPI_POLL_WEIGHT); - q_vector->rx.latency_range = I40E_LOW_LATENCY; - q_vector->tx.latency_range = I40E_LOW_LATENCY; - /* tie q_vector and vsi together */ vsi->q_vectors[v_idx] = q_vector; @@ -10473,6 +10589,9 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) if (err) goto err_unwind; + if (pf->flags & I40E_FLAG_IWARP_ENABLED) + i40e_client_update_msix_info(pf); + return 0; err_unwind: @@ -11089,6 +11208,16 @@ static int i40e_sw_init(struct i40e_pf *pf) /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } + /* Stopping the FW LLDP engine is only supported on the + * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP + * engine is not supported if NPAR is functioning on this + * part + */ + if (pf->hw.mac.type == I40E_MAC_XL710 && + !pf->hw.func_caps.npar_enable && + (pf->hw.aq.api_maj_ver > 1 || + (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6))) + pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP; #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { @@ -11156,20 +11285,18 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | - I40E_FLAG_FD_SB_AUTO_DISABLED); + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ - if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { - pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); - } } return need_reset; } @@ -11302,7 +11429,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].port = port; pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } /** @@ -11343,7 +11470,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, */ pf->udp_ports[idx].port = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); return; not_found: @@ -11688,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, + .ndo_xdp_xmit = i40e_xdp_xmit, + .ndo_xdp_flush = i40e_xdp_flush, }; /** @@ -12105,7 +12234,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - pf->flags |= I40E_FLAG_FILTER_SYNC; + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); } /* Update VSI BW information */ @@ -14213,10 +14342,21 @@ static int __maybe_unused i40e_suspend(struct device *dev) del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); - i40e_prep_for_reset(pf, false); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); + + i40e_prep_for_reset(pf, true); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); @@ -14228,6 +14368,8 @@ static int __maybe_unused i40e_suspend(struct device *dev) */ i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); + return 0; } @@ -14245,6 +14387,11 @@ static int __maybe_unused i40e_resume(struct device *dev) if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; + /* We need to hold the RTNL lock prior to restoring interrupt schemes, + * since we're going to be restoring queues + */ + rtnl_lock(); + /* We cleared the interrupt scheme when we suspended, so we need to * restore it now to resume device functionality. */ @@ -14255,7 +14402,9 @@ static int __maybe_unused i40e_resume(struct device *dev) } clear_bit(__I40E_DOWN, pf->state); - i40e_reset_and_rebuild(pf, false, false); + i40e_reset_and_rebuild(pf, false, true); + + rtnl_unlock(); /* Clear suspended state last after everything is recovered */ clear_bit(__I40E_SUSPENDED, pf->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 76a5cb04e4fe..ba9687c03795 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 80e66da6b145..9c3c3b0d3ac4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 83798b7841b9..2ec24188d6e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -287,7 +288,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -i40e_status +enum i40e_status_code i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); @@ -299,7 +300,7 @@ enum i40e_status_code i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count); -i40e_status +enum i40e_status_code i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 97381238eb7c..5b47dd1f75a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index c234758dad15..b3e206e49cc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index afb72e711d43..10c86f63dc52 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index d3e55f54a05e..410ba13bcf21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index e554aa6cf070..f174c72480ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -335,7 +336,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); } else { pf->fd_tcp4_filter_cnt--; } @@ -593,8 +594,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && - pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { + /* These set_bit() calls aren't atomic with the + * test_bit() here, but worse case we potentially + * disable ATR and queue a flush right after SB + * support is re-enabled. That shouldn't cause an + * issue in practice + */ + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } @@ -607,11 +614,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) { - pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED; + !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, + pf->state)) if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); - } } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -708,16 +714,22 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: use SW variables * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); - tail = readl(ring->tail); + if (!in_sw) { + head = i40e_get_head(ring); + tail = readl(ring->tail); + } else { + head = ring->next_to_clean; + tail = ring->next_to_use; + } if (head != tail) return (head < tail) ? @@ -774,7 +786,7 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = - i40e_get_tx_pending(tx_ring) ? packets : -1; + i40e_get_tx_pending(tx_ring, true) ? packets : -1; } } } @@ -898,7 +910,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - unsigned int j = i40e_get_tx_pending(tx_ring); + unsigned int j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && @@ -995,99 +1007,241 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) } } +static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) +{ + return &q_vector->rx == rc; +} + +static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) +{ + unsigned int divisor; + + switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; + break; + case I40E_LINK_SPEED_25GB: + case I40E_LINK_SPEED_20GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; + break; + default: + case I40E_LINK_SPEED_10GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; + break; + case I40E_LINK_SPEED_1GB: + case I40E_LINK_SPEED_100MB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; + break; + } + + return divisor; +} + /** - * i40e_set_new_dynamic_itr - Find new ITR level + * i40e_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * - * Returns true if ITR changed, false if not - * - * Stores a new ITR value based on packets and byte counts during - * the last interrupt. The advantage of per interrupt computation - * is faster updates and more accurate ITR for the current traffic - * pattern. Constants in this function were computed based on - * theoretical maximum wire speed and thresholds were set based on - * testing data as well as attempting to minimize response time + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static void i40e_update_itr(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) { - enum i40e_latency_range new_latency_range = rc->latency_range; - u32 new_itr = rc->itr; - int bytes_per_usec; - unsigned int usecs, estimated_usecs; + unsigned int avg_wire_size, packets, bytes, itr; + unsigned long next_update = jiffies; - if (rc->total_packets == 0 || !rc->itr) - return false; + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) + return; - usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_usec = rc->total_bytes / usecs; + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = i40e_container_is_rx(q_vector, rc) ? + I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : + I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } - /* The calculations in this algorithm depend on interrupts actually - * firing at the ITR rate. This may not happen if the packet rate is - * really low, or if we've been napi polling. Check to make sure - * that's not the case before we continue. + packets = rc->total_packets; + bytes = rc->total_bytes; + + if (i40e_container_is_rx(q_vector, rc)) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { + itr = I40E_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & I40E_ITR_MASK) == + I40E_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; + } + + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. */ - estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update); - if (estimated_usecs > usecs) { - new_latency_range = I40E_LOW_LATENCY; - goto reset_latency; + if (packets < 56) { + itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; } - /* simple throttlerate management - * 0-10MB/s lowest (50000 ints/s) - * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (18000 ints/s) + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= I40E_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr /= 2; + itr &= I40E_ITR_MASK; + if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) + itr = I40E_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = I40E_ITR_ADAPTIVE_BULK; + +adjust_by_size: + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to * - * The math works out because the divisor is in 10^(-6) which - * turns the bytes/us input value into MB/s values, but - * make sure to use usecs, as the register values written - * are in 2 usec increments in the ITR registers, and make sure - * to use the smoothed values that the countdown timer gives us. + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. */ - switch (new_latency_range) { - case I40E_LOWEST_LATENCY: - if (bytes_per_usec > 10) - new_latency_range = I40E_LOW_LATENCY; - break; - case I40E_LOW_LATENCY: - if (bytes_per_usec > 20) - new_latency_range = I40E_BULK_LATENCY; - else if (bytes_per_usec <= 10) - new_latency_range = I40E_LOWEST_LATENCY; - break; - case I40E_BULK_LATENCY: - default: - if (bytes_per_usec <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + if (avg_wire_size <= 60) { + /* Start at 250k ints/sec */ + avg_wire_size = 4096; + } else if (avg_wire_size <= 380) { + /* 250K ints/sec to 60K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 1696; + } else if (avg_wire_size <= 1084) { + /* 60K ints/sec to 36K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 36K ints/sec to 30K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 30K ints/sec */ + avg_wire_size = 32256; } -reset_latency: - rc->latency_range = new_latency_range; + /* If we are in low latency mode halve our delay which doubles the + * rate to somewhere between 100K to 16K ints/sec + */ + if (itr & I40E_ITR_ADAPTIVE_LATENCY) + avg_wire_size /= 2; - switch (new_latency_range) { - case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_50K; - break; - case I40E_LOW_LATENCY: - new_itr = I40E_ITR_20K; - break; - case I40E_BULK_LATENCY: - new_itr = I40E_ITR_18K; - break; - default: - break; + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * + I40E_ITR_ADAPTIVE_MIN_INC; + + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; } +clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + rc->total_bytes = 0; rc->total_packets = 0; - rc->last_itr_update = jiffies; - - if (new_itr != rc->itr) { - rc->itr = new_itr; - return true; - } - return false; } /** @@ -1434,9 +1588,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = i40e_rx_offset(rx_ring); - - /* initialize pagecnt_bias to 1 representing we fully own page */ - bi->pagecnt_bias = 1; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } @@ -1802,8 +1955,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -1991,7 +2144,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will - * either recycle the bufer or unmap it and free the associated resources. + * either recycle the buffer or unmap it and free the associated resources. */ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer) @@ -2061,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { - int result = I40E_XDP_PASS; + int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; @@ -2080,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_ring(xdp, xdp_ring); break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -2115,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } +static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); +} + /** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on @@ -2249,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } if (xdp_xmit) { - struct i40e_ring *xdp_ring; + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - - writel(xdp_ring->next_to_use, xdp_ring->tail); + i40e_xdp_ring_update_tail(xdp_ring); + xdp_do_flush_map(); } rx_ring->skb = skb; @@ -2274,29 +2435,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -static u32 i40e_buildreg_itr(const int type, const u16 itr) +static inline u32 i40e_buildreg_itr(const int type, u16 itr) { u32 val; + /* We don't bother with setting the CLEARPBA bit as the data sheet + * points out doing so is "meaningless since it was already + * auto-cleared". The auto-clearing happens when the interrupt is + * asserted. + * + * Hardware errata 28 for also indicates that writing to a + * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear + * an event in the PBA anyway so we need to rely on the automask + * to hold pending events for us until the interrupt is re-enabled + * + * The itr value is reported in microseconds, and the register + * value is recorded in 2 microsecond units. For this reason we + * only need to shift by the interval shift - 1 instead of the + * full value. + */ + itr &= I40E_ITR_MASK; + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ #define INTREG I40E_PFINT_DYN_CTLN -static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) -{ - return vsi->rx_rings[idx]->rx_itr_setting; -} -static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) -{ - return vsi->tx_rings[idx]->tx_itr_setting; -} +/* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ +#define ITR_COUNTDOWN_START 3 /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt @@ -2308,10 +2485,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - bool rx = false, tx = false; - u32 rxval, txval; - int idx = q_vector->v_idx; - int rx_itr_setting, tx_itr_setting; + u32 intval; /* If we don't have MSIX, then we only need to re-enable icr0 */ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { @@ -2319,65 +2493,49 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, return; } - /* avoid dynamic calculation if in countdown mode OR if - * all dynamic is disabled - */ - rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - - rx_itr_setting = get_rx_itr(vsi, idx); - tx_itr_setting = get_tx_itr(vsi, idx); - - if (q_vector->itr_countdown > 0 || - (!ITR_IS_DYNAMIC(rx_itr_setting) && - !ITR_IS_DYNAMIC(tx_itr_setting))) { - goto enable_int; - } - - if (ITR_IS_DYNAMIC(rx_itr_setting)) { - rx = i40e_set_new_dynamic_itr(&q_vector->rx); - rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); - } - - if (ITR_IS_DYNAMIC(tx_itr_setting)) { - tx = i40e_set_new_dynamic_itr(&q_vector->tx); - txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); - } - - if (rx || tx) { - /* get the higher of the two ITR adjustments and - * use the same value for both ITR registers - * when in adaptive mode (Rx and/or Tx) - */ - u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); - - q_vector->tx.itr = q_vector->rx.itr = itr; - txval = i40e_buildreg_itr(I40E_TX_ITR, itr); - tx = true; - rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); - rx = true; - } + /* These will do nothing if dynamic updates are not enabled */ + i40e_update_itr(q_vector, &q_vector->tx); + i40e_update_itr(q_vector, &q_vector->rx); - /* only need to enable the interrupt once, but need - * to possibly update both ITR values + /* This block of logic allows us to get away with only updating + * one ITR value with each interrupt. The idea is to perform a + * pseudo-lazy update with the following criteria. + * + * 1. Rx is given higher priority than Tx if both are in same state + * 2. If we must reduce an ITR that is given highest priority. + * 3. We then give priority to increasing ITR based on amount. */ - if (rx) { - /* set the INTENA_MSK_MASK so that this first write - * won't actually enable the interrupt, instead just - * updating the ITR (it's bit 31 PF and VF) + if (q_vector->rx.target_itr < q_vector->rx.current_itr) { + /* Rx ITR needs to be reduced, this is highest priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || + ((q_vector->rx.target_itr - q_vector->rx.current_itr) < + (q_vector->tx.target_itr - q_vector->tx.current_itr))) { + /* Tx ITR needs to be reduced, this is second priority + * Tx ITR needs to be increased more than Rx, fourth priority */ - rxval |= BIT(31); - /* don't check _DOWN because interrupt isn't being enabled */ - wr32(hw, INTREG(q_vector->reg_idx), rxval); + intval = i40e_buildreg_itr(I40E_TX_ITR, + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { + /* Rx ITR needs to be increased, third priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else { + /* No ITR update, lowest priority */ + intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; } -enable_int: if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(q_vector->reg_idx), txval); - - if (q_vector->itr_countdown) - q_vector->itr_countdown--; - else - q_vector->itr_countdown = ITR_COUNTDOWN_START; + wr32(hw, INTREG(q_vector->reg_idx), intval); } /** @@ -2501,7 +2659,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) + if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; /* if sampling is disabled do nothing */ @@ -2541,7 +2699,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ - if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) + if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN @@ -3509,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return i40e_xmit_frame_ring(skb, tx_ring); } + +/** + * i40e_xdp_xmit - Implements ndo_xdp_xmit + * @dev: netdev + * @xdp: XDP buffer + * + * Returns Zero if sent, else an error code + **/ +int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct i40e_vsi *vsi = np->vsi; + int err; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return -ENETDOWN; + + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + return -ENXIO; + + err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); + if (err != I40E_XDP_TX) + return -ENOSPC; + + return 0; +} + +/** + * i40e_xdp_flush - Implements ndo_xdp_flush + * @dev: netdev + **/ +void i40e_xdp_flush(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct i40e_vsi *vsi = np->vsi; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + return; + + i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 701b708628b0..3043483ec426 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -30,32 +31,37 @@ #include <net/xdp.h> /* Interrupt Throttling and Rate Limiting Goodies */ - -#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 0x0005 -#define I40E_ITR_50K 0x000A -#define I40E_ITR_20K 0x0019 -#define I40E_ITR_18K 0x001B -#define I40E_ITR_8K 0x003E -#define I40E_ITR_4K 0x007A -#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ - I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ - I40E_ITR_DYNAMIC) -#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ -#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ #define I40E_DEFAULT_IRQ_WORK 256 -#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) -#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) -#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +/* The datasheet for the X710 and XL710 indicate that the maximum value for + * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec + * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing + * the register value which is divided by 2 lets use the actual values and + * avoid an excessive amount of translation. + */ +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ +#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 10 /* all values below must be even */ +#define I40E_ITR_50K 20 +#define I40E_ITR_20K 50 +#define I40E_ITR_18K 60 +#define I40E_ITR_8K 122 +#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ +#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) +#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) + +#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) + /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero */ #define INTRL_ENA BIT(6) +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) + /** * i40e_intrl_usec_to_reg - convert interrupt rate limit to register * @intrl: interrupt rate limit to convert @@ -382,8 +388,7 @@ struct i40e_ring { * these values always store the USER setting, and must be converted * before programming to a register. */ - u16 rx_itr_setting; - u16 tx_itr_setting; + u16 itr_setting; u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -459,21 +464,21 @@ static inline void set_ring_xdp(struct i40e_ring *ring) ring->flags |= I40E_TXR_FLAGS_XDP; } -enum i40e_latency_range { - I40E_LOWEST_LATENCY = 0, - I40E_LOW_LATENCY = 1, - I40E_BULK_LATENCY = 2, -}; +#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 +#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e +#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 +#define I40E_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) struct i40e_ring_container { - /* array of pointers to rings */ - struct i40e_ring *ring; + struct i40e_ring *ring; /* pointer to linked list of ring(s) */ + unsigned long next_update; /* jiffies value of next update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ - unsigned long last_itr_update; /* jiffies of last ITR update */ u16 count; - enum i40e_latency_range latency_range; - u16 itr; + u16 target_itr; /* target ITR setting for ring(s) */ + u16 current_itr; /* current ITR setting for ring(s) */ }; /* iterator for handling rings in ring container */ @@ -501,10 +506,12 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40e_get_tx_pending(struct i40e_ring *ring); +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); +int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); +void i40e_xdp_flush(struct net_device *dev); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index cd294e6a8587..bfb80092b352 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -39,7 +40,7 @@ #define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_VF_VSI 4 #define I40E_MAX_CHAINED_RX_BUFFERS 5 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 @@ -1336,6 +1337,9 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_PTR_TYPE BIT(15) +#define I40E_SR_OCP_CFG_WORD0 0x2B +#define I40E_SR_OCP_ENABLED BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index e9309fb9084b..35173cbe80f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -258,6 +259,38 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, } /** + * i40e_get_real_pf_qid + * @vf: pointer to the VF info + * @vsi_id: vsi id + * @queue_id: queue number + * + * wrapper function to get pf_queue_id handling ADq code as well + **/ +static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) +{ + int i; + + if (vf->adq_enabled) { + /* Although VF considers all the queues(can be 1 to 16) as its + * own but they may actually belong to different VSIs(up to 4). + * We need to find which queues belongs to which VSI. + */ + for (i = 0; i < vf->num_tc; i++) { + if (queue_id < vf->ch[i].num_qps) { + vsi_id = vf->ch[i].vsi_id; + break; + } + /* find right queue id which is relative to a + * given VSI. + */ + queue_id -= vf->ch[i].num_qps; + } + } + + return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); +} + +/** * i40e_config_irq_link_list * @vf: pointer to the VF info * @vsi_id: id of VSI as given by the FW @@ -310,7 +343,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); wr32(hw, reg_idx, reg); @@ -333,8 +366,9 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, if (next_q < size) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, - vsi_queue_id); + pf_queue_id = i40e_get_real_pf_qid(vf, + vsi_id, + vsi_queue_id); } else { pf_queue_id = I40E_QUEUE_END_OF_LIST; qtype = 0; @@ -669,18 +703,20 @@ error_param: /** * i40e_alloc_vsi_res * @vf: pointer to the VF info - * @type: type of VSI to allocate + * @idx: VSI index, applies only for ADq mode, zero otherwise * * alloc VF vsi context & resources **/ -static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) +static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) { struct i40e_mac_filter *f = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; + u64 max_tx_rate = 0; int ret = 0; - vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); + vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, + vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, @@ -689,7 +725,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ret = -ENOENT; goto error_alloc_vsi_res; } - if (type == I40E_VSI_SRIOV) { + + if (!idx) { u64 hena = i40e_pf_get_default_rss_hena(pf); u8 broadcast[ETH_ALEN]; @@ -721,17 +758,29 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) spin_unlock_bh(&vsi->mac_filter_hash_lock); wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); + /* program mac filter only for VF VSI */ + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); } - /* program mac filter */ - ret = i40e_sync_vsi_filters(vsi); - if (ret) - dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); + /* storing VSI index and id for ADq and don't apply the mac filter */ + if (vf->adq_enabled) { + vf->ch[idx].vsi_idx = vsi->idx; + vf->ch[idx].vsi_id = vsi->id; + } /* Set VF bandwidth if specified */ if (vf->tx_rate) { + max_tx_rate = vf->tx_rate; + } else if (vf->ch[idx].max_tx_rate) { + max_tx_rate = vf->ch[idx].max_tx_rate; + } + + if (max_tx_rate) { + max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, - vf->tx_rate / 50, 0, NULL); + max_tx_rate, 0, NULL); if (ret) dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", vf->vf_id, ret); @@ -742,6 +791,92 @@ error_alloc_vsi_res: } /** + * i40e_map_pf_queues_to_vsi + * @vf: pointer to the VF info + * + * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This + * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. + **/ +static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg, num_tc = 1; /* VF has at least one traffic class */ + u16 vsi_id, qps; + int i, j; + + if (vf->adq_enabled) + num_tc = vf->num_tc; + + for (i = 0; i < num_tc; i++) { + if (vf->adq_enabled) { + qps = vf->ch[i].num_qps; + vsi_id = vf->ch[i].vsi_id; + } else { + qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + vsi_id = vf->lan_vsi_id; + } + + for (j = 0; j < 7; j++) { + if (j * 2 >= qps) { + /* end of list */ + reg = 0x07FF07FF; + } else { + u16 qid = i40e_vc_get_pf_queue_id(vf, + vsi_id, + j * 2); + reg = qid; + qid = i40e_vc_get_pf_queue_id(vf, vsi_id, + (j * 2) + 1); + reg |= qid << 16; + } + i40e_write_rx_ctl(hw, + I40E_VSILAN_QTABLE(j, vsi_id), + reg); + } + } +} + +/** + * i40e_map_pf_to_vf_queues + * @vf: pointer to the VF info + * + * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This + * function takes care of the second part VPLAN_QTABLE & completes VF mappings. + **/ +static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg, total_qps = 0; + u32 qps, num_tc = 1; /* VF has at least one traffic class */ + u16 vsi_id, qid; + int i, j; + + if (vf->adq_enabled) + num_tc = vf->num_tc; + + for (i = 0; i < num_tc; i++) { + if (vf->adq_enabled) { + qps = vf->ch[i].num_qps; + vsi_id = vf->ch[i].vsi_id; + } else { + qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + vsi_id = vf->lan_vsi_id; + } + + for (j = 0; j < qps; j++) { + qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); + + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); + wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), + reg); + total_qps++; + } + } +} + +/** * i40e_enable_vf_mappings * @vf: pointer to the VF info * @@ -751,8 +886,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - u32 reg, total_queue_pairs = 0; - int j; + u32 reg; /* Tell the hardware we're using noncontiguous mapping. HW requires * that VF queues be mapped using this method, even when they are @@ -765,30 +899,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); - /* map PF queues to VF queues */ - for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); - - reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); - wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); - total_queue_pairs++; - } - - /* map PF queues to VSI */ - for (j = 0; j < 7; j++) { - if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { - reg = 0x07FF07FF; /* unused */ - } else { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, - j * 2); - reg = qid; - qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, - (j * 2) + 1); - reg |= qid << 16; - } - i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), - reg); - } + i40e_map_pf_to_vf_queues(vf); + i40e_map_pf_queues_to_vsi(vf); i40e_flush(hw); } @@ -824,7 +936,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg_idx, reg; - int i, msix_vf; + int i, j, msix_vf; /* Start by disabling VF's configuration API to prevent the OS from * accessing the VF's VSI after it's freed / invalidated. @@ -846,6 +958,20 @@ static void i40e_free_vf_res(struct i40e_vf *vf) vf->lan_vsi_id = 0; vf->num_mac = 0; } + + /* do the accounting and remove additional ADq VSI's */ + if (vf->adq_enabled && vf->ch[0].vsi_idx) { + for (j = 0; j < vf->num_tc; j++) { + /* At this point VSI0 is already released so don't + * release it again and only clear their values in + * structure variables + */ + if (j) + i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); + vf->ch[j].vsi_idx = 0; + vf->ch[j].vsi_id = 0; + } + } msix_vf = pf->hw.func_caps.num_msix_vectors_vf; /* disable interrupts so the VF starts in a known state */ @@ -891,7 +1017,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; int total_queue_pairs = 0; - int ret; + int ret, idx; if (vf->num_req_queues && vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) @@ -900,11 +1026,30 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; /* allocate hw vsi context & associated resources */ - ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); + ret = i40e_alloc_vsi_res(vf, 0); if (ret) goto error_alloc; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + /* allocate additional VSIs based on tc information for ADq */ + if (vf->adq_enabled) { + if (pf->queues_left >= + (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { + /* TC 0 always belongs to VF VSI */ + for (idx = 1; idx < vf->num_tc; idx++) { + ret = i40e_alloc_vsi_res(vf, idx); + if (ret) + goto error_alloc; + } + /* send correct number of queues */ + total_queue_pairs = I40E_MAX_VF_QUEUES; + } else { + dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", + vf->vf_id); + vf->adq_enabled = false; + } + } + /* We account for each VF to get a default number of queue pairs. If * the VF has now requested more, we need to account for that to make * certain we never request more queues than we actually have left in @@ -1537,6 +1682,27 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) } /** + * i40e_del_qch - delete all the additional VSIs created as a part of ADq + * @vf: pointer to VF structure + **/ +static void i40e_del_qch(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + int i; + + /* first element in the array belongs to primary VF VSI and we shouldn't + * delete it. We should however delete the rest of the VSIs created + */ + for (i = 1; i < vf->num_tc; i++) { + if (vf->ch[i].vsi_idx) { + i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); + vf->ch[i].vsi_idx = 0; + vf->ch[i].vsi_id = 0; + } + } +} + +/** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1631,6 +1797,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -1855,27 +2024,37 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct i40e_pf *pf = vf->pf; - u16 vsi_id, vsi_queue_id; + u16 vsi_id, vsi_queue_id = 0; i40e_status aq_ret = 0; - int i; + int i, j = 0, idx = 0; + + vsi_id = qci->vsi_id; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } - vsi_id = qci->vsi_id; if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; - vsi_queue_id = qpi->txq.queue_id; - if ((qpi->txq.vsi_id != vsi_id) || - (qpi->rxq.vsi_id != vsi_id) || - (qpi->rxq.queue_id != vsi_queue_id) || - !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { + + if (!vf->adq_enabled) { + vsi_queue_id = qpi->txq.queue_id; + + if (qpi->txq.vsi_id != qci->vsi_id || + qpi->rxq.vsi_id != qci->vsi_id || + qpi->rxq.queue_id != vsi_queue_id) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1887,9 +2066,33 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto error_param; } + + /* For ADq there can be up to 4 VSIs with max 4 queues each. + * VF does not know about these additional VSIs and all + * it cares is about its own queues. PF configures these queues + * to its appropriate VSIs based on TC mapping + **/ + if (vf->adq_enabled) { + if (j == (vf->ch[idx].num_qps - 1)) { + idx++; + j = 0; /* resetting the queue count */ + vsi_queue_id = 0; + } else { + j++; + vsi_queue_id++; + } + vsi_id = vf->ch[idx].vsi_id; + } } /* set vsi num_queue_pairs in use to num configured by VF */ - pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; + if (!vf->adq_enabled) { + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = + qci->num_queue_pairs; + } else { + for (i = 0; i < vf->num_tc; i++) + pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = + vf->ch[i].num_qps; + } error_param: /* send the response to the VF */ @@ -1898,6 +2101,33 @@ error_param: } /** + * i40e_validate_queue_map + * @vsi_id: vsi id + * @queuemap: Tx or Rx queue map + * + * check if Tx or Rx queue map is valid + **/ +static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, + unsigned long queuemap) +{ + u16 vsi_queue_id, queue_id; + + for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { + if (vf->adq_enabled) { + vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; + queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); + } else { + queue_id = vsi_queue_id; + } + + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) + return -EINVAL; + } + + return 0; +} + +/** * i40e_vc_config_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1911,9 +2141,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; struct virtchnl_vector_map *map; - u16 vsi_id, vsi_queue_id, vector_id; + u16 vsi_id, vector_id; i40e_status aq_ret = 0; - unsigned long tempmap; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { @@ -1923,7 +2152,6 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < irqmap_info->num_vectors; i++) { map = &irqmap_info->vecmap[i]; - vector_id = map->vector_id; vsi_id = map->vsi_id; /* validate msg params */ @@ -1933,23 +2161,14 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - /* lookout for the invalid queue index */ - tempmap = map->rxq_map; - for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, - vsi_queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } + if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; } - tempmap = map->txq_map; - for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, - vsi_queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } + if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; } i40e_config_irq_link_list(vf, vsi_id, map); @@ -1975,6 +2194,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_pf *pf = vf->pf; u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; + int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; @@ -1993,6 +2213,16 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) aq_ret = I40E_ERR_TIMEOUT; + + /* need to start the rings for additional ADq VSI's as well */ + if (vf->adq_enabled) { + /* zero belongs to LAN VSI */ + for (i = 1; i < vf->num_tc; i++) { + if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) + aq_ret = I40E_ERR_TIMEOUT; + } + } + error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2139,25 +2369,47 @@ error_param: /** * i40e_check_vf_permission * @vf: pointer to the VF info - * @macaddr: pointer to the MAC Address being checked + * @al: MAC address list from virtchnl * - * Check if the VF has permission to add or delete unicast MAC address - * filters and return error code -EPERM if not. Then check if the - * address filter requested is broadcast or zero and if so return - * an invalid MAC address error code. + * Check that the given list of MAC addresses is allowed. Will return -EPERM + * if any address in the list is not valid. Checks the following conditions: + * + * 1) broadcast and zero addresses are never valid + * 2) unicast addresses are not allowed if the VMM has administratively set + * the VF MAC address, unless the VF is marked as privileged. + * 3) There is enough space to add all the addresses. + * + * Note that to guarantee consistency, it is expected this function be called + * while holding the mac_filter_hash_lock, as otherwise the current number of + * addresses might not be accurate. **/ -static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) +static inline int i40e_check_vf_permission(struct i40e_vf *vf, + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; - int ret = 0; + int i; + + /* If this VF is not privileged, then we can't add more than a limited + * number of addresses. Check to make sure that the additions do not + * push us over the limit. + */ + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + return -EPERM; + } + + for (i = 0; i < al->num_elements; i++) { + u8 *addr = al->list[i].addr; + + if (is_broadcast_ether_addr(addr) || + is_zero_ether_addr(addr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", + addr); + return I40E_ERR_INVALID_MAC_ADDR; + } - if (is_broadcast_ether_addr(macaddr) || - is_zero_ether_addr(macaddr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); - ret = I40E_ERR_INVALID_MAC_ADDR; - } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { /* If the host VMM administrator has set the VF MAC address * administratively via the ndo_set_vf_mac command then deny * permission to the VF to add or delete unicast MAC addresses. @@ -2165,16 +2417,16 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); - ret = -EPERM; - } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - dev_err(&pf->pdev->dev, - "VF is not trusted, switch the VF to trusted to add more functionality\n"); - ret = -EPERM; + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + !is_multicast_ether_addr(addr) && vf->pf_set_mac && + !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + return -EPERM; + } } - return ret; + + return 0; } /** @@ -2201,11 +2453,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - for (i = 0; i < al->num_elements; i++) { - ret = i40e_check_vf_permission(vf, al->list[i].addr); - if (ret) - goto error_param; - } vsi = pf->vsi[vf->lan_vsi_idx]; /* Lock once, because all function inside for loop accesses VSI's @@ -2213,6 +2460,12 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) */ spin_lock_bh(&vsi->mac_filter_hash_lock); + ret = i40e_check_vf_permission(vf, al); + if (ret) { + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_param; + } + /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -2688,6 +2941,618 @@ err: } /** + * i40e_validate_cloud_filter + * @mask: mask for TC filter + * @data: data for TC filter + * + * This function validates cloud filter programmed as TC filter for ADq + **/ +static int i40e_validate_cloud_filter(struct i40e_vf *vf, + struct virtchnl_filter *tc_filter) +{ + struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; + struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + struct i40e_mac_filter *f; + struct hlist_node *h; + bool found = false; + int bkt; + + if (!tc_filter->action) { + dev_info(&pf->pdev->dev, + "VF %d: Currently ADq doesn't support Drop Action\n", + vf->vf_id); + goto err; + } + + /* action_meta is TC number here to which the filter is applied */ + if (!tc_filter->action_meta || + tc_filter->action_meta > I40E_MAX_VF_VSI) { + dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", + vf->vf_id, tc_filter->action_meta); + goto err; + } + + /* Check filter if it's programmed for advanced mode or basic mode. + * There are two ADq modes (for VF only), + * 1. Basic mode: intended to allow as many filter options as possible + * to be added to a VF in Non-trusted mode. Main goal is + * to add filters to its own MAC and VLAN id. + * 2. Advanced mode: is for allowing filters to be applied other than + * its own MAC or VLAN. This mode requires the VF to be + * Trusted. + */ + if (mask.dst_mac[0] && !mask.dst_ip[0]) { + vsi = pf->vsi[vf->lan_vsi_idx]; + f = i40e_find_mac(vsi, data.dst_mac); + + if (!f) { + dev_info(&pf->pdev->dev, + "Destination MAC %pM doesn't belong to VF %d\n", + data.dst_mac, vf->vf_id); + goto err; + } + + if (mask.vlan_id) { + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, + hlist) { + if (f->vlan == ntohs(data.vlan_id)) { + found = true; + break; + } + } + if (!found) { + dev_info(&pf->pdev->dev, + "VF %d doesn't have any VLAN id %u\n", + vf->vf_id, ntohs(data.vlan_id)); + goto err; + } + } + } else { + /* Check if VF is trusted */ + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", + vf->vf_id); + return I40E_ERR_CONFIG; + } + } + + if (mask.dst_mac[0] & data.dst_mac[0]) { + if (is_broadcast_ether_addr(data.dst_mac) || + is_zero_ether_addr(data.dst_mac)) { + dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", + vf->vf_id, data.dst_mac); + goto err; + } + } + + if (mask.src_mac[0] & data.src_mac[0]) { + if (is_broadcast_ether_addr(data.src_mac) || + is_zero_ether_addr(data.src_mac)) { + dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", + vf->vf_id, data.src_mac); + goto err; + } + } + + if (mask.dst_port & data.dst_port) { + if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) { + dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", + vf->vf_id); + goto err; + } + } + + if (mask.src_port & data.src_port) { + if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) { + dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", + vf->vf_id); + goto err; + } + } + + if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && + tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { + dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", + vf->vf_id); + goto err; + } + + if (mask.vlan_id & data.vlan_id) { + if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { + dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", + vf->vf_id); + goto err; + } + } + + return I40E_SUCCESS; +err: + return I40E_ERR_CONFIG; +} + +/** + * i40e_find_vsi_from_seid - searches for the vsi with the given seid + * @vf: pointer to the VF info + * @seid - seid of the vsi it is searching for + **/ +static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + int i; + + for (i = 0; i < vf->num_tc ; i++) { + vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); + if (vsi && vsi->seid == seid) + return vsi; + } + return NULL; +} + +/** + * i40e_del_all_cloud_filters + * @vf: pointer to the VF info + * + * This function deletes all cloud filters + **/ +static void i40e_del_all_cloud_filters(struct i40e_vf *vf) +{ + struct i40e_cloud_filter *cfilter = NULL; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + struct hlist_node *node; + int ret; + + hlist_for_each_entry_safe(cfilter, node, + &vf->cloud_filter_list, cloud_node) { + vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); + + if (!vsi) { + dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", + vf->vf_id, cfilter->seid); + continue; + } + + if (cfilter->dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, + false); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, false); + if (ret) + dev_err(&pf->pdev->dev, + "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", + vf->vf_id, i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + + hlist_del(&cfilter->cloud_node); + kfree(cfilter); + vf->num_cloud_filters--; + } +} + +/** + * i40e_vc_del_cloud_filter + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * This function deletes a cloud filter programmed as TC filter for ADq + **/ +static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) +{ + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; + struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; + struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; + struct i40e_cloud_filter cfilter, *cf = NULL; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + struct hlist_node *node; + i40e_status aq_ret = 0; + int i, ret; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (!vf->adq_enabled) { + dev_info(&pf->pdev->dev, + "VF %d: ADq not enabled, can't apply cloud filter\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (i40e_validate_cloud_filter(vf, vcf)) { + dev_info(&pf->pdev->dev, + "VF %d: Invalid input, can't apply cloud filter\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + memset(&cfilter, 0, sizeof(cfilter)); + /* parse destination mac address */ + for (i = 0; i < ETH_ALEN; i++) + cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; + + /* parse source mac address */ + for (i = 0; i < ETH_ALEN; i++) + cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; + + cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; + cfilter.dst_port = mask.dst_port & tcf.dst_port; + cfilter.src_port = mask.src_port & tcf.src_port; + + switch (vcf->flow_type) { + case VIRTCHNL_TCP_V4_FLOW: + cfilter.n_proto = ETH_P_IP; + if (mask.dst_ip[0] & tcf.dst_ip[0]) + memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, + ARRAY_SIZE(tcf.dst_ip)); + else if (mask.src_ip[0] & tcf.dst_ip[0]) + memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, + ARRAY_SIZE(tcf.dst_ip)); + break; + case VIRTCHNL_TCP_V6_FLOW: + cfilter.n_proto = ETH_P_IPV6; + if (mask.dst_ip[3] & tcf.dst_ip[3]) + memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, + sizeof(cfilter.ip.v6.dst_ip6)); + if (mask.src_ip[3] & tcf.src_ip[3]) + memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, + sizeof(cfilter.ip.v6.src_ip6)); + break; + default: + /* TC filter can be configured based on different combinations + * and in this case IP is not a part of filter config + */ + dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", + vf->vf_id); + } + + /* get the vsi to which the tc belongs to */ + vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; + cfilter.seid = vsi->seid; + cfilter.flags = vcf->field_flags; + + /* Deleting TC filter */ + if (tcf.dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); + else + ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); + if (ret) { + dev_err(&pf->pdev->dev, + "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", + vf->vf_id, i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + goto err; + } + + hlist_for_each_entry_safe(cf, node, + &vf->cloud_filter_list, cloud_node) { + if (cf->seid != cfilter.seid) + continue; + if (mask.dst_port) + if (cfilter.dst_port != cf->dst_port) + continue; + if (mask.dst_mac[0]) + if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) + continue; + /* for ipv4 data to be valid, only first byte of mask is set */ + if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) + if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, + ARRAY_SIZE(tcf.dst_ip))) + continue; + /* for ipv6, mask is set for all sixteen bytes (4 words) */ + if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) + if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, + sizeof(cfilter.ip.v6.src_ip6))) + continue; + if (mask.vlan_id) + if (cfilter.vlan_id != cf->vlan_id) + continue; + + hlist_del(&cf->cloud_node); + kfree(cf); + vf->num_cloud_filters--; + } + +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, + aq_ret); +} + +/** + * i40e_vc_add_cloud_filter + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * This function adds a cloud filter programmed as TC filter for ADq + **/ +static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) +{ + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; + struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; + struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; + struct i40e_cloud_filter *cfilter = NULL; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + i40e_status aq_ret = 0; + int i, ret; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (!vf->adq_enabled) { + dev_info(&pf->pdev->dev, + "VF %d: ADq is not enabled, can't apply cloud filter\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (i40e_validate_cloud_filter(vf, vcf)) { + dev_info(&pf->pdev->dev, + "VF %d: Invalid input/s, can't apply cloud filter\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); + if (!cfilter) + return -ENOMEM; + + /* parse destination mac address */ + for (i = 0; i < ETH_ALEN; i++) + cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; + + /* parse source mac address */ + for (i = 0; i < ETH_ALEN; i++) + cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; + + cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; + cfilter->dst_port = mask.dst_port & tcf.dst_port; + cfilter->src_port = mask.src_port & tcf.src_port; + + switch (vcf->flow_type) { + case VIRTCHNL_TCP_V4_FLOW: + cfilter->n_proto = ETH_P_IP; + if (mask.dst_ip[0] & tcf.dst_ip[0]) + memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, + ARRAY_SIZE(tcf.dst_ip)); + else if (mask.src_ip[0] & tcf.dst_ip[0]) + memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, + ARRAY_SIZE(tcf.dst_ip)); + break; + case VIRTCHNL_TCP_V6_FLOW: + cfilter->n_proto = ETH_P_IPV6; + if (mask.dst_ip[3] & tcf.dst_ip[3]) + memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, + sizeof(cfilter->ip.v6.dst_ip6)); + if (mask.src_ip[3] & tcf.src_ip[3]) + memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, + sizeof(cfilter->ip.v6.src_ip6)); + break; + default: + /* TC filter can be configured based on different combinations + * and in this case IP is not a part of filter config + */ + dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", + vf->vf_id); + } + + /* get the VSI to which the TC belongs to */ + vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; + cfilter->seid = vsi->seid; + cfilter->flags = vcf->field_flags; + + /* Adding cloud filter programmed as TC filter */ + if (tcf.dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + if (ret) { + dev_err(&pf->pdev->dev, + "VF %d: Failed to add cloud filter, err %s aq_err %s\n", + vf->vf_id, i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + goto err; + } + + INIT_HLIST_NODE(&cfilter->cloud_node); + hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); + vf->num_cloud_filters++; +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, + aq_ret); +} + +/** + * i40e_vc_add_qch_msg: Add queue channel and enable ADq + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + **/ +static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) +{ + struct virtchnl_tc_info *tci = + (struct virtchnl_tc_info *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int i, adq_request_qps = 0, speed = 0; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + /* ADq cannot be applied if spoof check is ON */ + if (vf->spoofchk) { + dev_err(&pf->pdev->dev, + "Spoof check is ON, turn it OFF to enable ADq\n"); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { + dev_err(&pf->pdev->dev, + "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + /* max number of traffic classes for VF currently capped at 4 */ + if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { + dev_err(&pf->pdev->dev, + "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", + vf->vf_id, tci->num_tc); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + /* validate queues for each TC */ + for (i = 0; i < tci->num_tc; i++) + if (!tci->list[i].count || + tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { + dev_err(&pf->pdev->dev, + "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", + vf->vf_id, i, tci->list[i].count); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + /* need Max VF queues but already have default number of queues */ + adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; + + if (pf->queues_left < adq_request_qps) { + dev_err(&pf->pdev->dev, + "No queues left to allocate to VF %d\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } else { + /* we need to allocate max VF queues to enable ADq so as to + * make sure ADq enabled VF always gets back queues when it + * goes through a reset. + */ + vf->num_queue_pairs = I40E_MAX_VF_QUEUES; + } + + /* get link speed in MB to validate rate limit */ + switch (ls->link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + speed = SPEED_100; + break; + case VIRTCHNL_LINK_SPEED_1GB: + speed = SPEED_1000; + break; + case VIRTCHNL_LINK_SPEED_10GB: + speed = SPEED_10000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + speed = SPEED_20000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + speed = SPEED_25000; + break; + case VIRTCHNL_LINK_SPEED_40GB: + speed = SPEED_40000; + break; + default: + dev_err(&pf->pdev->dev, + "Cannot detect link speed\n"); + aq_ret = I40E_ERR_PARAM; + goto err; + } + + /* parse data from the queue channel info */ + vf->num_tc = tci->num_tc; + for (i = 0; i < vf->num_tc; i++) { + if (tci->list[i].max_tx_rate) { + if (tci->list[i].max_tx_rate > speed) { + dev_err(&pf->pdev->dev, + "Invalid max tx rate %llu specified for VF %d.", + tci->list[i].max_tx_rate, + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + goto err; + } else { + vf->ch[i].max_tx_rate = + tci->list[i].max_tx_rate; + } + } + vf->ch[i].num_qps = tci->list[i].count; + } + + /* set this flag only after making sure all inputs are sane */ + vf->adq_enabled = true; + /* num_req_queues is set when user changes number of queues via ethtool + * and this causes issue for default VSI(which depends on this variable) + * when ADq is enabled, hence reset it. + */ + vf->num_req_queues = 0; + + /* reset the VF in order to allocate resources */ + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + + return I40E_SUCCESS; + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, + aq_ret); +} + +/** + * i40e_vc_del_qch_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + **/ +static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) +{ + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + if (vf->adq_enabled) { + i40e_del_all_cloud_filters(vf); + i40e_del_qch(vf); + vf->adq_enabled = false; + vf->num_tc = 0; + dev_info(&pf->pdev->dev, + "Deleting Queue Channels and cloud filters for ADq on VF %d\n", + vf->vf_id); + } else { + dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", + vf->vf_id); + aq_ret = I40E_ERR_PARAM; + } + + /* reset the VF in order to allocate resources */ + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + + return I40E_SUCCESS; + +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, + aq_ret); +} + +/** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure * @vf_id: source VF id @@ -2816,7 +3681,18 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_REQUEST_QUEUES: ret = i40e_vc_request_queues_msg(vf, msg, msglen); break; - + case VIRTCHNL_OP_ENABLE_CHANNELS: + ret = i40e_vc_add_qch_msg(vf, msg); + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + ret = i40e_vc_del_qch_msg(vf, msg); + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + ret = i40e_vc_add_cloud_filter(vf, msg); + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + ret = i40e_vc_del_cloud_filter(vf, msg); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", @@ -2889,6 +3765,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) int ret = 0; struct hlist_node *h; int bkt; + u8 i; /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { @@ -2900,6 +3777,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, + * but wait for up to 300 milliseconds to be safe. + */ + for (i = 0; i < 15; i++) { + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + break; + msleep(20); + } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); @@ -3382,6 +4269,16 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) i40e_vc_disable_vf(vf); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); + + if (vf->adq_enabled) { + if (!vf->trusted) { + dev_info(&pf->pdev->dev, + "VF %u no longer Trusted, deleting all cloud filters\n", + vf_id); + i40e_del_all_cloud_filters(vf); + } + } + out: return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 5efc4f92bb37..57f727bb9e36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver @@ -69,6 +70,19 @@ enum i40e_vf_capabilities { I40E_VIRTCHNL_VF_CAP_IWARP, }; +/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. + * These variables are used to store indices, id's and number of queues + * for each VSI including that of primary VF VSI. Each Traffic class is + * termed as channel and each channel can in-turn have 4 queues which + * means max 16 queues overall per VF. + */ +struct i40evf_channel { + u16 vsi_idx; /* index in PF struct for all channel VSIs */ + u16 vsi_id; /* VSI ID used by firmware */ + u16 num_qps; /* number of queue pairs requested by user */ + u64 max_tx_rate; /* bandwidth rate allocation for VSIs */ +}; + /* VF information structure */ struct i40e_vf { struct i40e_pf *pf; @@ -111,6 +125,13 @@ struct i40e_vf { u16 num_mac; u16 num_vlan; + /* ADq related variables */ + bool adq_enabled; /* flag to enable adq */ + u8 num_tc; + struct i40evf_channel ch[I40E_MAX_VF_VSI]; + struct hlist_head cloud_filter_list; + u16 num_cloud_filters; + /* RDMA Client */ struct virtchnl_iwarp_qvlist_info *qvlist_info; }; diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile index a393f4a07f06..1e89c5487676 100644 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ b/drivers/net/ethernet/intel/i40evf/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index d1aab6b8bfb1..6fd677efa9da 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index e0bfaa3d4a21..a7137c165256 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 815de8d9c3fb..439e71882049 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h index 8e6a6dd9212b..7e0fddd8af36 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 67bf5cebb76f..67140cdbcd7a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 0469e4bfd3ec..352dd3f3eb6a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 00ed24bfce13..7432596164f4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h index a5d79877354c..ddac0e4908d3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index a90737786c34..8668ad6c1a65 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 47c429931a57..72501bd0f1a9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index 10febcfd7cd8..c9c935659758 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h index 5b222246e08b..0d7993ecb99a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h index 9a5100b2b7c7..ece01dd12a3c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 357d6051281f..12bd937861e7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -196,7 +197,7 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = - i40evf_get_tx_pending(tx_ring, false) ? packets : -1; + i40evf_get_tx_pending(tx_ring, true) ? packets : -1; } } } @@ -392,99 +393,241 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) val); } +static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) +{ + return &q_vector->rx == rc; +} + +static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) +{ + unsigned int divisor; + + switch (q_vector->adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; + break; + case I40E_LINK_SPEED_25GB: + case I40E_LINK_SPEED_20GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; + break; + default: + case I40E_LINK_SPEED_10GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; + break; + case I40E_LINK_SPEED_1GB: + case I40E_LINK_SPEED_100MB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; + break; + } + + return divisor; +} + /** - * i40e_set_new_dynamic_itr - Find new ITR level + * i40e_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * - * Returns true if ITR changed, false if not - * - * Stores a new ITR value based on packets and byte counts during - * the last interrupt. The advantage of per interrupt computation - * is faster updates and more accurate ITR for the current traffic - * pattern. Constants in this function were computed based on - * theoretical maximum wire speed and thresholds were set based on - * testing data as well as attempting to minimize response time + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static void i40e_update_itr(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) { - enum i40e_latency_range new_latency_range = rc->latency_range; - u32 new_itr = rc->itr; - int bytes_per_usec; - unsigned int usecs, estimated_usecs; + unsigned int avg_wire_size, packets, bytes, itr; + unsigned long next_update = jiffies; - if (rc->total_packets == 0 || !rc->itr) - return false; + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) + return; + + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = i40e_container_is_rx(q_vector, rc) ? + I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : + I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } + + packets = rc->total_packets; + bytes = rc->total_bytes; - usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_usec = rc->total_bytes / usecs; + if (i40e_container_is_rx(q_vector, rc)) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { + itr = I40E_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & I40E_ITR_MASK) == + I40E_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; + } - /* The calculations in this algorithm depend on interrupts actually - * firing at the ITR rate. This may not happen if the packet rate is - * really low, or if we've been napi polling. Check to make sure - * that's not the case before we continue. + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. */ - estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update); - if (estimated_usecs > usecs) { - new_latency_range = I40E_LOW_LATENCY; - goto reset_latency; + if (packets < 56) { + itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; + } + + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= I40E_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr /= 2; + itr &= I40E_ITR_MASK; + if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) + itr = I40E_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; } - /* simple throttlerate management - * 0-10MB/s lowest (50000 ints/s) - * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (18000 ints/s) + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = I40E_ITR_ADAPTIVE_BULK; + +adjust_by_size: + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * - * The math works out because the divisor is in 10^(-6) which - * turns the bytes/us input value into MB/s values, but - * make sure to use usecs, as the register values written - * are in 2 usec increments in the ITR registers, and make sure - * to use the smoothed values that the countdown timer gives us. + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. */ - switch (new_latency_range) { - case I40E_LOWEST_LATENCY: - if (bytes_per_usec > 10) - new_latency_range = I40E_LOW_LATENCY; - break; - case I40E_LOW_LATENCY: - if (bytes_per_usec > 20) - new_latency_range = I40E_BULK_LATENCY; - else if (bytes_per_usec <= 10) - new_latency_range = I40E_LOWEST_LATENCY; - break; - case I40E_BULK_LATENCY: - default: - if (bytes_per_usec <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + if (avg_wire_size <= 60) { + /* Start at 250k ints/sec */ + avg_wire_size = 4096; + } else if (avg_wire_size <= 380) { + /* 250K ints/sec to 60K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 1696; + } else if (avg_wire_size <= 1084) { + /* 60K ints/sec to 36K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 36K ints/sec to 30K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 30K ints/sec */ + avg_wire_size = 32256; } -reset_latency: - rc->latency_range = new_latency_range; + /* If we are in low latency mode halve our delay which doubles the + * rate to somewhere between 100K to 16K ints/sec + */ + if (itr & I40E_ITR_ADAPTIVE_LATENCY) + avg_wire_size /= 2; - switch (new_latency_range) { - case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_50K; - break; - case I40E_LOW_LATENCY: - new_itr = I40E_ITR_20K; - break; - case I40E_BULK_LATENCY: - new_itr = I40E_ITR_18K; - break; - default: - break; + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * + I40E_ITR_ADAPTIVE_MIN_INC; + + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; } +clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + rc->total_bytes = 0; rc->total_packets = 0; - rc->last_itr_update = jiffies; - - if (new_itr != rc->itr) { - rc->itr = new_itr; - return true; - } - return false; } /** @@ -1273,7 +1416,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will - * either recycle the bufer or unmap it and free the associated resources. + * either recycle the buffer or unmap it and free the associated resources. */ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer) @@ -1457,33 +1600,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -static u32 i40e_buildreg_itr(const int type, const u16 itr) +static inline u32 i40e_buildreg_itr(const int type, u16 itr) { u32 val; + /* We don't bother with setting the CLEARPBA bit as the data sheet + * points out doing so is "meaningless since it was already + * auto-cleared". The auto-clearing happens when the interrupt is + * asserted. + * + * Hardware errata 28 for also indicates that writing to a + * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear + * an event in the PBA anyway so we need to rely on the automask + * to hold pending events for us until the interrupt is re-enabled + * + * The itr value is reported in microseconds, and the register + * value is recorded in 2 microsecond units. For this reason we + * only need to shift by the interval shift - 1 instead of the + * full value. + */ + itr &= I40E_ITR_MASK; + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ #define INTREG I40E_VFINT_DYN_CTLN1 -static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) -{ - struct i40evf_adapter *adapter = vsi->back; - return adapter->rx_rings[idx].rx_itr_setting; -} - -static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) -{ - struct i40evf_adapter *adapter = vsi->back; - - return adapter->tx_rings[idx].tx_itr_setting; -} +/* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ +#define ITR_COUNTDOWN_START 3 /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt @@ -1495,70 +1650,51 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - bool rx = false, tx = false; - u32 rxval, txval; - int idx = q_vector->v_idx; - int rx_itr_setting, tx_itr_setting; - - /* avoid dynamic calculation if in countdown mode OR if - * all dynamic is disabled - */ - rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - - rx_itr_setting = get_rx_itr(vsi, idx); - tx_itr_setting = get_tx_itr(vsi, idx); + u32 intval; - if (q_vector->itr_countdown > 0 || - (!ITR_IS_DYNAMIC(rx_itr_setting) && - !ITR_IS_DYNAMIC(tx_itr_setting))) { - goto enable_int; - } - - if (ITR_IS_DYNAMIC(rx_itr_setting)) { - rx = i40e_set_new_dynamic_itr(&q_vector->rx); - rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); - } + /* These will do nothing if dynamic updates are not enabled */ + i40e_update_itr(q_vector, &q_vector->tx); + i40e_update_itr(q_vector, &q_vector->rx); - if (ITR_IS_DYNAMIC(tx_itr_setting)) { - tx = i40e_set_new_dynamic_itr(&q_vector->tx); - txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); - } - - if (rx || tx) { - /* get the higher of the two ITR adjustments and - * use the same value for both ITR registers - * when in adaptive mode (Rx and/or Tx) - */ - u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); - - q_vector->tx.itr = q_vector->rx.itr = itr; - txval = i40e_buildreg_itr(I40E_TX_ITR, itr); - tx = true; - rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); - rx = true; - } - - /* only need to enable the interrupt once, but need - * to possibly update both ITR values + /* This block of logic allows us to get away with only updating + * one ITR value with each interrupt. The idea is to perform a + * pseudo-lazy update with the following criteria. + * + * 1. Rx is given higher priority than Tx if both are in same state + * 2. If we must reduce an ITR that is given highest priority. + * 3. We then give priority to increasing ITR based on amount. */ - if (rx) { - /* set the INTENA_MSK_MASK so that this first write - * won't actually enable the interrupt, instead just - * updating the ITR (it's bit 31 PF and VF) + if (q_vector->rx.target_itr < q_vector->rx.current_itr) { + /* Rx ITR needs to be reduced, this is highest priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || + ((q_vector->rx.target_itr - q_vector->rx.current_itr) < + (q_vector->tx.target_itr - q_vector->tx.current_itr))) { + /* Tx ITR needs to be reduced, this is second priority + * Tx ITR needs to be increased more than Rx, fourth priority */ - rxval |= BIT(31); - /* don't check _DOWN because interrupt isn't being enabled */ - wr32(hw, INTREG(q_vector->reg_idx), rxval); + intval = i40e_buildreg_itr(I40E_TX_ITR, + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { + /* Rx ITR needs to be increased, third priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else { + /* No ITR update, lowest priority */ + intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; } -enable_int: if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(q_vector->reg_idx), txval); - - if (q_vector->itr_countdown) - q_vector->itr_countdown--; - else - q_vector->itr_countdown = ITR_COUNTDOWN_START; + wr32(hw, INTREG(q_vector->reg_idx), intval); } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 7798a6645c3f..5790897eae2e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -28,31 +29,35 @@ #define _I40E_TXRX_H_ /* Interrupt Throttling and Rate Limiting Goodies */ - -#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 0x0005 -#define I40E_ITR_50K 0x000A -#define I40E_ITR_20K 0x0019 -#define I40E_ITR_18K 0x001B -#define I40E_ITR_8K 0x003E -#define I40E_ITR_4K 0x007A -#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ - I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ - I40E_ITR_DYNAMIC) -#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ -#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ #define I40E_DEFAULT_IRQ_WORK 256 -#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) -#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) -#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +/* The datasheet for the X710 and XL710 indicate that the maximum value for + * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec + * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing + * the register value which is divided by 2 lets use the actual values and + * avoid an excessive amount of translation. + */ +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ +#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 10 /* all values below must be even */ +#define I40E_ITR_50K 20 +#define I40E_ITR_20K 50 +#define I40E_ITR_18K 60 +#define I40E_ITR_8K 122 +#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ +#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) +#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) + +#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) + /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero */ #define INTRL_ENA BIT(6) +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) #define I40E_INTRL_8K 125 /* 8000 ints/sec */ @@ -362,8 +367,7 @@ struct i40e_ring { * these values always store the USER setting, and must be converted * before programming to a register. */ - u16 rx_itr_setting; - u16 tx_itr_setting; + u16 itr_setting; u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -425,21 +429,21 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; } -enum i40e_latency_range { - I40E_LOWEST_LATENCY = 0, - I40E_LOW_LATENCY = 1, - I40E_BULK_LATENCY = 2, -}; +#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 +#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e +#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 +#define I40E_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) struct i40e_ring_container { - /* array of pointers to rings */ - struct i40e_ring *ring; + struct i40e_ring *ring; /* pointer to linked list of ring(s) */ + unsigned long next_update; /* jiffies value of next update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ - unsigned long last_itr_update; /* jiffies of last ITR update */ u16 count; - enum i40e_latency_range latency_range; - u16 itr; + u16 target_itr; /* target ITR setting for ring(s) */ + u16 current_itr; /* current ITR setting for ring(s) */ }; /* iterator for handling rings in ring container */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 54951c84a481..449de4b0058e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 9690c1ea019e..3a7a1e77bf39 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -52,7 +53,10 @@ #include <linux/socket.h> #include <linux/jiffies.h> #include <net/ip6_checksum.h> +#include <net/pkt_cls.h> #include <net/udp.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> #include "i40e_type.h" #include <linux/avf/virtchnl.h> @@ -106,6 +110,7 @@ struct i40e_vsi { #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) #define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -117,9 +122,8 @@ struct i40e_q_vector { struct i40e_ring_container rx; struct i40e_ring_container tx; u32 ring_mask; + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ u8 num_ringpairs; /* total number of ring pairs in vector */ -#define ITR_COUNTDOWN_START 100 - u8 itr_countdown; /* when 0 or 1 update ITR */ u16 v_idx; /* index in the vsi->q_vector array. */ u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; @@ -169,6 +173,28 @@ struct i40evf_vlan_filter { bool add; /* filter needs to be added */ }; +#define I40EVF_MAX_TRAFFIC_CLASS 4 +/* State of traffic class creation */ +enum i40evf_tc_state_t { + __I40EVF_TC_INVALID, /* no traffic class, default state */ + __I40EVF_TC_RUNNING, /* traffic classes have been created */ +}; + +/* channel info */ +struct i40evf_channel_config { + struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; + enum i40evf_tc_state_t state; + u8 total_qps; +}; + +/* State of cloud filter */ +enum i40evf_cloud_filter_state_t { + __I40EVF_CF_INVALID, /* cloud filter not added */ + __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ + __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ + __I40EVF_CF_ACTIVE, /* cloud filter is active */ +}; + /* Driver state. The order of these is important! */ enum i40evf_state_t { __I40EVF_STARTUP, /* driver loaded, probe complete */ @@ -190,6 +216,36 @@ enum i40evf_critical_section_t { __I40EVF_IN_REMOVE_TASK, /* device being removed */ }; +#define I40EVF_CLOUD_FIELD_OMAC 0x01 +#define I40EVF_CLOUD_FIELD_IMAC 0x02 +#define I40EVF_CLOUD_FIELD_IVLAN 0x04 +#define I40EVF_CLOUD_FIELD_TEN_ID 0x08 +#define I40EVF_CLOUD_FIELD_IIP 0x10 + +#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC +#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC +#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_IVLAN) +#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ + I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_IVLAN |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP + +/* bookkeeping of cloud filters */ +struct i40evf_cloud_filter { + enum i40evf_cloud_filter_state_t state; + struct list_head list; + struct virtchnl_filter f; + unsigned long cookie; + bool del; /* filter needs to be deleted */ + bool add; /* filter needs to be added */ +}; + /* board specific private data structure */ struct i40evf_adapter { struct timer_list watchdog_timer; @@ -225,13 +281,10 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_IMIR_ENABLED BIT(1) -#define I40EVF_FLAG_MQ_CAPABLE BIT(2) #define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) #define I40EVF_FLAG_RESET_PENDING BIT(4) #define I40EVF_FLAG_RESET_NEEDED BIT(5) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(7) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) #define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) #define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) @@ -241,6 +294,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_ALLMULTI_ON BIT(14) #define I40EVF_FLAG_LEGACY_RX BIT(15) #define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define I40EVF_FLAG_QUEUES_DISABLED BIT(17) /* duplicates for common code */ #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED @@ -269,6 +323,10 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) #define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) #define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) +#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) +#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) +#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) +#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) /* OS defined structs */ struct net_device *netdev; @@ -314,6 +372,13 @@ struct i40evf_adapter { u16 rss_lut_size; u8 *rss_key; u8 *rss_lut; + /* ADQ related members */ + struct i40evf_channel_config ch_config; + u8 num_tc; + struct list_head cloud_filter_list; + /* lock to protest access to the cloud filter list */ + spinlock_t cloud_filter_list_lock; + u16 num_cloud_filters; }; @@ -380,4 +445,8 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); void i40evf_notify_client_open(struct i40e_vsi *vsi); void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); +void i40evf_enable_channels(struct i40evf_adapter *adapter); +void i40evf_disable_channels(struct i40evf_adapter *adapter); +void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); +void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index e2d8aa19d205..dc4cde274fb8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -457,14 +458,14 @@ static int __i40evf_get_coalesce(struct net_device *netdev, rx_ring = &adapter->rx_rings[queue]; tx_ring = &adapter->tx_rings[queue]; - if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting)) + if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) ec->use_adaptive_rx_coalesce = 1; - if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting)) + if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) ec->use_adaptive_tx_coalesce = 1; - ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; return 0; } @@ -502,7 +503,7 @@ static int i40evf_get_per_queue_coalesce(struct net_device *netdev, /** * i40evf_set_itr_per_queue - set ITR values for specific queue - * @vsi: the VSI to set values for + * @adapter: the VF adapter struct to set values for * @ec: coalesce settings from ethtool * @queue: the queue to modify * @@ -514,33 +515,29 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, { struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; - struct i40e_vsi *vsi = &adapter->vsi; - struct i40e_hw *hw = &adapter->hw; struct i40e_q_vector *q_vector; - u16 vector; - rx_ring->rx_itr_setting = ec->rx_coalesce_usecs; - tx_ring->tx_itr_setting = ec->tx_coalesce_usecs; + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC; + rx_ring->itr_setting |= I40E_ITR_DYNAMIC; if (!ec->use_adaptive_rx_coalesce) - rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC; + rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC; + tx_ring->itr_setting |= I40E_ITR_DYNAMIC; if (!ec->use_adaptive_tx_coalesce) - tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC; + tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; q_vector = rx_ring->q_vector; - q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting); - vector = vsi->base_vector + q_vector->v_idx; - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr); + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector = tx_ring->q_vector; - q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting); - vector = vsi->base_vector + q_vector->v_idx; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - i40e_flush(hw); + /* The interrupt handler itself will take care of programming + * the Tx and Rx ITR values based on the values we have entered + * into the q_vector, no need to write the values now. + */ } /** @@ -565,8 +562,8 @@ static int __i40evf_set_coalesce(struct net_device *netdev, if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || + (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } @@ -575,8 +572,8 @@ static int __i40evf_set_coalesce(struct net_device *netdev, if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || + (ec->tx_coalesce_usecs > I40E_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } @@ -699,6 +696,12 @@ static int i40evf_set_channels(struct net_device *netdev, return -EINVAL; } + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); + return -EINVAL; + } + /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 16989ad2ca90..5f71532be7f1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -353,11 +354,12 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) rx_ring->vsi = &adapter->vsi; q_vector->rx.ring = rx_ring; q_vector->rx.count++; - q_vector->rx.latency_range = I40E_LOW_LATENCY; - q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting); + q_vector->rx.next_update = jiffies + 1; + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); - q_vector->itr_countdown = ITR_COUNTDOWN_START; - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr); + wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), + q_vector->rx.current_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; } /** @@ -378,11 +380,12 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) tx_ring->vsi = &adapter->vsi; q_vector->tx.ring = tx_ring; q_vector->tx.count++; - q_vector->tx.latency_range = I40E_LOW_LATENCY; - q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting); - q_vector->itr_countdown = ITR_COUNTDOWN_START; + q_vector->tx.next_update = jiffies + 1; + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr); + wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; } /** @@ -783,7 +786,7 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, **/ static struct i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, - u8 *macaddr) + const u8 *macaddr) { struct i40evf_mac_filter *f; @@ -806,20 +809,18 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, **/ static struct i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, - u8 *macaddr) + const u8 *macaddr) { struct i40evf_mac_filter *f; if (!macaddr) return NULL; - spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) - goto clearout; + return f; ether_addr_copy(f->macaddr, macaddr); @@ -830,8 +831,6 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, f->remove = false; } -clearout: - spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } @@ -866,9 +865,10 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } + f = i40evf_add_filter(adapter, addr->sa_data); + spin_unlock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); @@ -878,50 +878,64 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) } /** - * i40evf_set_rx_mode - NDO callback to set the netdev filters - * @netdev: network interface device structure - **/ -static void i40evf_set_rx_mode(struct net_device *netdev) + * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_mac_filter *f, *ftmp; - struct netdev_hw_addr *uca; - struct netdev_hw_addr *mca; - struct netdev_hw_addr *ha; - - /* add addr if not already in the filter list */ - netdev_for_each_uc_addr(uca, netdev) { - i40evf_add_filter(adapter, uca->addr); - } - netdev_for_each_mc_addr(mca, netdev) { - i40evf_add_filter(adapter, mca->addr); - } - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - netdev_for_each_mc_addr(mca, netdev) - if (ether_addr_equal(mca->addr, f->macaddr)) - goto bottom_of_search_loop; - - netdev_for_each_uc_addr(uca, netdev) - if (ether_addr_equal(uca->addr, f->macaddr)) - goto bottom_of_search_loop; + if (i40evf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} - for_each_dev_addr(netdev, ha) - if (ether_addr_equal(ha->addr, f->macaddr)) - goto bottom_of_search_loop; +/** + * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_mac_filter *f; - if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) - goto bottom_of_search_loop; + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; - /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ + f = i40evf_find_filter(adapter, addr); + if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - -bottom_of_search_loop: - continue; } + return 0; +} + +/** + * i40evf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void i40evf_set_rx_mode(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + spin_unlock_bh(&adapter->mac_vlan_list_lock); if (netdev->flags & IFF_PROMISC && !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) @@ -936,8 +950,6 @@ bottom_of_search_loop: else if (!(netdev->flags & IFF_ALLMULTI) && adapter->flags & I40EVF_FLAG_ALLMULTI_ON) adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; - - spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** @@ -1025,7 +1037,9 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter) void i40evf_down(struct i40evf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct i40evf_vlan_filter *vlf; struct i40evf_mac_filter *f; + struct i40evf_cloud_filter *cf; if (adapter->state <= __I40EVF_DOWN_PENDING) return; @@ -1038,17 +1052,29 @@ void i40evf_down(struct i40evf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->remove = true; } + /* remove all VLAN filters */ - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - f->remove = true; + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->remove = true; } spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* remove all cloud filters */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + cf->del = true; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && adapter->state != __I40EVF_RESETTING) { /* cancel any current operation */ @@ -1059,6 +1085,7 @@ void i40evf_down(struct i40evf_adapter *adapter) */ adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; } @@ -1144,6 +1171,9 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) */ if (adapter->num_req_queues) num_active_queues = adapter->num_req_queues; + else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + num_active_queues = adapter->ch_config.total_qps; else num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, @@ -1169,7 +1199,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; - tx_ring->tx_itr_setting = I40E_ITR_TX_DEF; + tx_ring->itr_setting = I40E_ITR_TX_DEF; if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; @@ -1178,7 +1208,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - rx_ring->rx_itr_setting = I40E_ITR_RX_DEF; + rx_ring->itr_setting = I40E_ITR_RX_DEF; } adapter->num_active_queues = num_active_queues; @@ -1471,6 +1501,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) goto err_alloc_q_vectors; } + /* If we've made it so far while ADq flag being ON, then we haven't + * bailed out anywhere in middle. And ADq isn't just enabled but actual + * resources have been allocated in the reset path. + * Now we can truly claim that ADq is enabled. + */ + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", + adapter->num_tc); + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", adapter->num_active_queues); @@ -1712,6 +1752,27 @@ static void i40evf_watchdog_task(struct work_struct *work) i40evf_set_promiscuous(adapter, 0); goto watchdog_done; } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { + i40evf_enable_channels(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { + i40evf_disable_channels(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { + i40evf_add_cloud_filter(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { + i40evf_del_cloud_filter(adapter); + goto watchdog_done; + } + schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); if (adapter->state == __I40EVF_RUNNING) @@ -1735,6 +1796,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) { struct i40evf_mac_filter *f, *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; + struct i40evf_cloud_filter *cf, *cftmp; adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; @@ -1756,7 +1818,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - /* Delete all of the filters, both MAC and VLAN. */ + /* Delete all of the filters */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_del(&f->list); kfree(f); @@ -1769,6 +1831,14 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); i40evf_free_queues(adapter); @@ -1798,9 +1868,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; struct i40evf_vlan_filter *vlf; + struct i40evf_cloud_filter *cf; struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; @@ -1893,6 +1965,7 @@ continue_reset: i40evf_free_all_rx_resources(adapter); i40evf_free_all_tx_resources(adapter); + adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; /* kill and reinit the admin queue */ i40evf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -1924,8 +1997,19 @@ continue_reset: spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* check if TCs are running and re-add all cloud filters */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + cf->add = true; + } + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -2191,6 +2275,712 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) } /** + * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth + * @adapter: board private structure + * @max_tx_rate: max Tx bw for a tc + **/ +static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, + u64 max_tx_rate) +{ + int speed = 0, ret = 0; + + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + speed = 40000; + break; + case I40E_LINK_SPEED_25GB: + speed = 25000; + break; + case I40E_LINK_SPEED_20GB: + speed = 20000; + break; + case I40E_LINK_SPEED_10GB: + speed = 10000; + break; + case I40E_LINK_SPEED_1GB: + speed = 1000; + break; + case I40E_LINK_SPEED_100MB: + speed = 100; + break; + default: + break; + } + + if (max_tx_rate > speed) { + dev_err(&adapter->pdev->dev, + "Invalid tx rate specified\n"); + ret = -EINVAL; + } + + return ret; +} + +/** + * i40evf_validate_channel_config - validate queue mapping info + * @adapter: board private structure + * @mqprio_qopt: queue parameters + * + * This function validates if the config provided by the user to + * configure queue channels is valid or not. Returns 0 on a valid + * config. + **/ +static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u64 total_max_rate = 0; + int i, num_qps = 0; + u64 tx_rate = 0; + int ret = 0; + + if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || + mqprio_qopt->qopt.num_tc < 1) + return -EINVAL; + + for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { + if (!mqprio_qopt->qopt.count[i] || + mqprio_qopt->qopt.offset[i] != num_qps) + return -EINVAL; + if (mqprio_qopt->min_rate[i]) { + dev_err(&adapter->pdev->dev, + "Invalid min tx rate (greater than 0) specified\n"); + return -EINVAL; + } + /*convert to Mbps */ + tx_rate = div_u64(mqprio_qopt->max_rate[i], + I40EVF_MBPS_DIVISOR); + total_max_rate += tx_rate; + num_qps += mqprio_qopt->qopt.count[i]; + } + if (num_qps > MAX_QUEUES) + return -EINVAL; + + ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); + return ret; +} + +/** + * i40evf_del_all_cloud_filters - delete all cloud filters + * on the traffic classes + **/ +static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf, *cftmp; + + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); +} + +/** + * __i40evf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_date: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct virtchnl_vf_resource *vfres = adapter->vf_res; + u8 num_tc = 0, total_qps = 0; + int ret = 0, netdev_tc = 0; + u64 max_tx_rate; + u16 mode; + int i; + + num_tc = mqprio_qopt->qopt.num_tc; + mode = mqprio_qopt->mode; + + /* delete queue_channel */ + if (!mqprio_qopt->qopt.hw) { + if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { + /* reset the tc configuration */ + netdev_reset_tc(netdev); + adapter->num_tc = 0; + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + i40evf_del_all_cloud_filters(adapter); + adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; + goto exit; + } else { + return -EINVAL; + } + } + + /* add queue channel */ + if (mode == TC_MQPRIO_MODE_CHANNEL) { + if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { + dev_err(&adapter->pdev->dev, "ADq not supported\n"); + return -EOPNOTSUPP; + } + if (adapter->ch_config.state != __I40EVF_TC_INVALID) { + dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); + return -EINVAL; + } + + ret = i40evf_validate_ch_config(adapter, mqprio_qopt); + if (ret) + return ret; + /* Return if same TC config is requested */ + if (adapter->num_tc == num_tc) + return 0; + adapter->num_tc = num_tc; + + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + if (i < num_tc) { + adapter->ch_config.ch_info[i].count = + mqprio_qopt->qopt.count[i]; + adapter->ch_config.ch_info[i].offset = + mqprio_qopt->qopt.offset[i]; + total_qps += mqprio_qopt->qopt.count[i]; + max_tx_rate = mqprio_qopt->max_rate[i]; + /* convert to Mbps */ + max_tx_rate = div_u64(max_tx_rate, + I40EVF_MBPS_DIVISOR); + adapter->ch_config.ch_info[i].max_tx_rate = + max_tx_rate; + } else { + adapter->ch_config.ch_info[i].count = 1; + adapter->ch_config.ch_info[i].offset = 0; + } + } + adapter->ch_config.total_qps = total_qps; + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; + netdev_reset_tc(netdev); + /* Report the tc mapping up the stack */ + netdev_set_num_tc(adapter->netdev, num_tc); + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + u16 qcount = mqprio_qopt->qopt.count[i]; + u16 qoffset = mqprio_qopt->qopt.offset[i]; + + if (i < num_tc) + netdev_set_tc_queue(netdev, netdev_tc++, qcount, + qoffset); + } + } +exit: + return ret; +} + +/** + * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel + * @adapter: board private structure + * @cls_flower: pointer to struct tc_cls_flower_offload + * @filter: pointer to cloud filter structure + */ +static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *f, + struct i40evf_cloud_filter *filter) +{ + u16 n_proto_mask = 0; + u16 n_proto_key = 0; + u8 field_flags = 0; + u16 addr_type = 0; + u16 n_proto = 0; + int i = 0; + struct virtchnl_filter *vf = &filter->f; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + + if (mask->keyid != 0) + field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + n_proto = n_proto_key & n_proto_mask; + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) + return -EINVAL; + if (n_proto == ETH_P_IPV6) { + /* specify flow type as TCP IPv6 */ + vf->flow_type = VIRTCHNL_TCP_V6_FLOW; + } + + if (key->ip_proto != IPPROTO_TCP) { + dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); + return -EINVAL; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + /* use is_broadcast and is_zero to check for all 0xf or 0 */ + if (!is_zero_ether_addr(mask->dst)) { + if (is_broadcast_ether_addr(mask->dst)) { + field_flags |= I40EVF_CLOUD_FIELD_OMAC; + } else { + dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", + mask->dst); + return I40E_ERR_CONFIG; + } + } + + if (!is_zero_ether_addr(mask->src)) { + if (is_broadcast_ether_addr(mask->src)) { + field_flags |= I40EVF_CLOUD_FIELD_IMAC; + } else { + dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", + mask->src); + return I40E_ERR_CONFIG; + } + } + + if (!is_zero_ether_addr(key->dst)) + if (is_valid_ether_addr(key->dst) || + is_multicast_ether_addr(key->dst)) { + /* set the mask if a valid dst_mac address */ + for (i = 0; i < ETH_ALEN; i++) + vf->mask.tcp_spec.dst_mac[i] |= 0xff; + ether_addr_copy(vf->data.tcp_spec.dst_mac, + key->dst); + } + + if (!is_zero_ether_addr(key->src)) + if (is_valid_ether_addr(key->src) || + is_multicast_ether_addr(key->src)) { + /* set the mask if a valid dst_mac address */ + for (i = 0; i < ETH_ALEN; i++) + vf->mask.tcp_spec.src_mac[i] |= 0xff; + ether_addr_copy(vf->data.tcp_spec.src_mac, + key->src); + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + + if (mask->vlan_id) { + if (mask->vlan_id == VLAN_VID_MASK) { + field_flags |= I40EVF_CLOUD_FIELD_IVLAN; + } else { + dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", + mask->vlan_id); + return I40E_ERR_CONFIG; + } + } + vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); + vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + + addr_type = key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + if (mask->dst) { + if (mask->dst == cpu_to_be32(0xffffffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", + be32_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + + if (mask->src) { + if (mask->src == cpu_to_be32(0xffffffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", + be32_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + + if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { + dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); + return I40E_ERR_CONFIG; + } + if (key->dst) { + vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); + vf->data.tcp_spec.dst_ip[0] = key->dst; + } + if (key->src) { + vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); + vf->data.tcp_spec.src_ip[0] = key->src; + } + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + /* validate mask, make sure it is not IPV6_ADDR_ANY */ + if (ipv6_addr_any(&mask->dst)) { + dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", + IPV6_ADDR_ANY); + return I40E_ERR_CONFIG; + } + + /* src and dest IPv6 address should not be LOOPBACK + * (0:0:0:0:0:0:0:1) which can be represented as ::1 + */ + if (ipv6_addr_loopback(&key->dst) || + ipv6_addr_loopback(&key->src)) { + dev_err(&adapter->pdev->dev, + "ipv6 addr should not be loopback\n"); + return I40E_ERR_CONFIG; + } + if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + field_flags |= I40EVF_CLOUD_FIELD_IIP; + + for (i = 0; i < 4; i++) + vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32, + sizeof(vf->data.tcp_spec.dst_ip)); + for (i = 0; i < 4; i++) + vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32, + sizeof(vf->data.tcp_spec.src_ip)); + } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + + if (mask->src) { + if (mask->src == cpu_to_be16(0xffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", + be16_to_cpu(mask->src)); + return I40E_ERR_CONFIG; + } + } + + if (mask->dst) { + if (mask->dst == cpu_to_be16(0xffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", + be16_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + if (key->dst) { + vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); + vf->data.tcp_spec.dst_port = key->dst; + } + + if (key->src) { + vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); + vf->data.tcp_spec.src_port = key->src; + } + } + vf->field_flags = field_flags; + + return 0; +} + +/** + * i40evf_handle_tclass - Forward to a traffic class on the device + * @adapter: board private structure + * @tc: traffic class index on the device + * @filter: pointer to cloud filter structure + */ +static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, + struct i40evf_cloud_filter *filter) +{ + if (tc == 0) + return 0; + if (tc < adapter->num_tc) { + if (!filter->f.data.tcp_spec.dst_port) { + dev_err(&adapter->pdev->dev, + "Specify destination port to redirect to traffic class other than TC0\n"); + return -EINVAL; + } + } + /* redirect to a traffic class on the same device */ + filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; + filter->f.action_meta = tc; + return 0; +} + +/** + * i40evf_configure_clsflower - Add tc flower filters + * @adapter: board private structure + * @cls_flower: Pointer to struct tc_cls_flower_offload + */ +static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + struct i40evf_cloud_filter *filter = NULL; + int err = -EINVAL, count = 50; + + if (tc < 0) { + dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + if (--count == 0) + goto err; + udelay(1); + } + + filter->cookie = cls_flower->cookie; + + /* set the mask to all zeroes to begin with */ + memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); + /* start out with flow type and eth type IPv4 to begin with */ + filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; + err = i40evf_parse_cls_flower(adapter, cls_flower, filter); + if (err < 0) + goto err; + + err = i40evf_handle_tclass(adapter, tc, filter); + if (err < 0) + goto err; + + /* add filter to the list */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_add_tail(&filter->list, &adapter->cloud_filter_list); + adapter->num_cloud_filters++; + filter->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + spin_unlock_bh(&adapter->cloud_filter_list_lock); +err: + if (err) + kfree(filter); + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return err; +} + +/* i40evf_find_cf - Find the cloud filter in the list + * @adapter: Board private structure + * @cookie: filter specific cookie + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * cloud_filter_list_lock. + */ +static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, + unsigned long *cookie) +{ + struct i40evf_cloud_filter *filter = NULL; + + if (!cookie) + return NULL; + + list_for_each_entry(filter, &adapter->cloud_filter_list, list) { + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) + return filter; + } + return NULL; +} + +/** + * i40evf_delete_clsflower - Remove tc flower filters + * @adapter: board private structure + * @cls_flower: Pointer to struct tc_cls_flower_offload + */ +static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + struct i40evf_cloud_filter *filter = NULL; + int err = 0; + + spin_lock_bh(&adapter->cloud_filter_list_lock); + filter = i40evf_find_cf(adapter, &cls_flower->cookie); + if (filter) { + filter->del = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + } else { + err = -EINVAL; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + return err; +} + +/** + * i40evf_setup_tc_cls_flower - flower classifier offloads + * @netdev: net device to configure + * @type_data: offload data + */ +static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return i40evf_configure_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_DESTROY: + return i40evf_delete_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_STATS: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +/** + * i40evf_setup_tc_block_cb - block callback for tc + * @type: type of offload + * @type_data: offload data + * @cb_priv: + * + * This function is the block callback for traffic classes + **/ +static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return i40evf_setup_tc_cls_flower(cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_setup_tc_block - register callbacks for tc + * @netdev: network interface device structure + * @f: tc offload data + * + * This function registers block callbacks for tc + * offloads + **/ +static int i40evf_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, + adapter, adapter); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, + adapter); + return 0; + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type: type of offload + * @type_date: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return __i40evf_setup_tc(netdev, type_data); + case TC_SETUP_BLOCK: + return i40evf_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } +} + +/** * i40evf_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -2236,7 +3026,12 @@ static int i40evf_open(struct net_device *netdev) if (err) goto err_req_irq; + spin_lock_bh(&adapter->mac_vlan_list_lock); + i40evf_add_filter(adapter, adapter->hw.mac.addr); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_configure(adapter); i40evf_up_complete(adapter); @@ -2457,6 +3252,7 @@ static const struct net_device_ops i40evf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40evf_netpoll, #endif + .ndo_setup_tc = i40evf_setup_tc, }; /** @@ -2571,6 +3367,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + /* Enable cloud filter if ADQ is supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) + hw_features |= NETIF_F_HW_TC; netdev->hw_features |= hw_features; @@ -2938,9 +3737,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&hw->aq.arq_mutex); spin_lock_init(&adapter->mac_vlan_list_lock); + spin_lock_init(&adapter->cloud_filter_list_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->cloud_filter_list); INIT_WORK(&adapter->reset_task, i40evf_reset_task); INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); @@ -3065,7 +3866,9 @@ static void i40evf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_vlan_filter *vlf, *vlftmp; struct i40evf_mac_filter *f, *ftmp; + struct i40evf_cloud_filter *cf, *cftmp; struct i40e_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ @@ -3087,6 +3890,7 @@ static void i40evf_remove(struct pci_dev *pdev) /* Shut down all the garbage mashers on the detention level */ adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; i40evf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -3127,13 +3931,21 @@ static void i40evf_remove(struct pci_dev *pdev) list_del(&f->list); kfree(f); } - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - list_del(&f->list); - kfree(f); + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + list_del(&vlf->list); + kfree(vlf); } spin_unlock_bh(&adapter->mac_vlan_list_lock); + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + list_del(&cf->list); + kfree(cf); + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 50ce0d6c09ef..26a59890532f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver @@ -161,7 +162,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_OFFLOAD_ADQ; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; @@ -344,6 +346,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) void i40evf_map_queues(struct i40evf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; + struct virtchnl_vector_map *vecmap; int v_idx, q_vectors, len; struct i40e_q_vector *q_vector; @@ -367,17 +370,22 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vimi->num_vectors = adapter->num_msix_vectors; /* Queue vectors first */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vectors + v_idx; - vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; - vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; - vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; - vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; + q_vector = &adapter->q_vectors[v_idx]; + vecmap = &vimi->vecmap[v_idx]; + + vecmap->vsi_id = adapter->vsi_res->vsi_id; + vecmap->vector_id = v_idx + NONQ_VECS; + vecmap->txq_map = q_vector->ring_mask; + vecmap->rxq_map = q_vector->ring_mask; + vecmap->rxitr_idx = I40E_RX_ITR; + vecmap->txitr_idx = I40E_TX_ITR; } /* Misc vector last - this is only for AdminQ messages */ - vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; - vimi->vecmap[v_idx].vector_id = 0; - vimi->vecmap[v_idx].txq_map = 0; - vimi->vecmap[v_idx].rxq_map = 0; + vecmap = &vimi->vecmap[v_idx]; + vecmap->vsi_id = adapter->vsi_res->vsi_id; + vecmap->vector_id = 0; + vecmap->txq_map = 0; + vecmap->rxq_map = 0; adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, @@ -459,7 +467,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) more = true; } - veal = kzalloc(len, GFP_KERNEL); + veal = kzalloc(len, GFP_ATOMIC); if (!veal) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -532,7 +540,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct virtchnl_ether_addr)); more = true; } - veal = kzalloc(len, GFP_KERNEL); + veal = kzalloc(len, GFP_ATOMIC); if (!veal) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -606,7 +614,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); more = true; } - vvfl = kzalloc(len, GFP_KERNEL); + vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -678,7 +686,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); more = true; } - vvfl = kzalloc(len, GFP_KERNEL); + vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -967,6 +975,205 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) } /** + * i40evf_enable_channel + * @adapter: adapter structure + * + * Request that the PF enable channels as specified by + * the user via tc tool. + **/ +void i40evf_enable_channels(struct i40evf_adapter *adapter) +{ + struct virtchnl_tc_info *vti = NULL; + u16 len; + int i; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", + adapter->current_op); + return; + } + + len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) + + sizeof(struct virtchnl_tc_info); + + vti = kzalloc(len, GFP_KERNEL); + if (!vti) + return; + vti->num_tc = adapter->num_tc; + for (i = 0; i < vti->num_tc; i++) { + vti->list[i].count = adapter->ch_config.ch_info[i].count; + vti->list[i].offset = adapter->ch_config.ch_info[i].offset; + vti->list[i].pad = 0; + vti->list[i].max_tx_rate = + adapter->ch_config.ch_info[i].max_tx_rate; + } + + adapter->ch_config.state = __I40EVF_TC_RUNNING; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, + (u8 *)vti, len); + kfree(vti); +} + +/** + * i40evf_disable_channel + * @adapter: adapter structure + * + * Request that the PF disable channels that are configured + **/ +void i40evf_disable_channels(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", + adapter->current_op); + return; + } + + adapter->ch_config.state = __I40EVF_TC_INVALID; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, + NULL, 0); +} + +/** + * i40evf_print_cloud_filter + * @adapter: adapter structure + * @f: cloud filter to print + * + * Print the cloud filter + **/ +static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, + struct virtchnl_filter *f) +{ + switch (f->flow_type) { + case VIRTCHNL_TCP_V4_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip[0], + &f->data.tcp_spec.src_ip[0], + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + case VIRTCHNL_TCP_V6_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip, + &f->data.tcp_spec.src_ip, + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + } +} + +/** + * i40evf_add_cloud_filter + * @adapter: adapter structure + * + * Request that the PF add cloud filters as specified + * by the user via tc tool. + **/ +void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf; + struct virtchnl_filter *f; + int len = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", + adapter->current_op); + return; + } + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->add) { + count++; + break; + } + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; + + len = sizeof(struct virtchnl_filter); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->add) { + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + cf->add = false; + cf->state = __I40EVF_CF_ADD_PENDING; + i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_ADD_CLOUD_FILTER, + (u8 *)f, len); + } + } + kfree(f); +} + +/** + * i40evf_del_cloud_filter + * @adapter: adapter structure + * + * Request that the PF delete cloud filters as specified + * by the user via tc tool. + **/ +void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf, *cftmp; + struct virtchnl_filter *f; + int len = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", + adapter->current_op); + return; + } + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->del) { + count++; + break; + } + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + return; + } + adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; + + len = sizeof(struct virtchnl_filter); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + if (cf->del) { + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + cf->del = false; + cf->state = __I40EVF_CF_DEL_PENDING; + i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_DEL_CLOUD_FILTER, + (u8 *)f, len); + } + } + kfree(f); +} + +/** * i40evf_request_reset * @adapter: adapter structure * @@ -1011,14 +1218,25 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (adapter->link_up == link_up) break; - /* If we get link up message and start queues before - * our queues are configured it will trigger a TX hang. - * In that case, just ignore the link status message, - * we'll get another one after we enable queues and - * actually prepared to send traffic. - */ - if (link_up && adapter->state != __I40EVF_RUNNING) - break; + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __I40EVF_RUNNING) + break; + + /* For ADq enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & + I40EVF_FLAG_QUEUES_DISABLED) + break; + } adapter->link_up = link_up; if (link_up) { @@ -1031,7 +1249,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "PF reset warning received\n"); + dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { adapter->flags |= I40EVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); @@ -1063,6 +1281,57 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", i40evf_stat_str(&adapter->hw, v_retval)); break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __I40EVF_TC_INVALID; + netdev_reset_tc(netdev); + netif_tx_start_all_queues(netdev); + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __I40EVF_TC_RUNNING; + netif_tx_start_all_queues(netdev); + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf, *cftmp; + + list_for_each_entry_safe(cf, cftmp, + &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_ADD_PENDING) { + cf->state = __I40EVF_CF_INVALID; + dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", + i40evf_stat_str(&adapter->hw, + v_retval)); + i40evf_print_cloud_filter(adapter, + &cf->f); + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + } + } + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf; + + list_for_each_entry(cf, &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_DEL_PENDING) { + cf->state = __I40EVF_CF_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", + i40evf_stat_str(&adapter->hw, + v_retval)); + i40evf_print_cloud_filter(adapter, + &cf->f); + } + } + } + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, @@ -1102,6 +1371,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ i40evf_irq_enable(adapter, true); + adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); @@ -1156,6 +1426,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } } break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf; + + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->state == __I40EVF_CF_ADD_PENDING) + cf->state = __I40EVF_CF_ACTIVE; + } + } + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf, *cftmp; + + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_DEL_PENDING) { + cf->state = __I40EVF_CF_INVALID; + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + } + } + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile new file mode 100644 index 000000000000..4058673fd853 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018, Intel Corporation. + +# +# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver +# + +obj-$(CONFIG_ICE) += ice.o + +ice-y := ice_main.o \ + ice_controlq.o \ + ice_common.o \ + ice_nvm.o \ + ice_switch.o \ + ice_sched.o \ + ice_txrx.o \ + ice_ethtool.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h new file mode 100644 index 000000000000..d8b5fff581e7 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_H_ +#define _ICE_H_ + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/compiler.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/cpumask.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/workqueue.h> +#include <linux/aer.h> +#include <linux/interrupt.h> +#include <linux/ethtool.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/bitmap.h> +#include <linux/log2.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_bridge.h> +#include <net/ipv6.h> +#include "ice_devids.h" +#include "ice_type.h" +#include "ice_txrx.h" +#include "ice_switch.h" +#include "ice_common.h" +#include "ice_sched.h" + +extern const char ice_drv_ver[]; +#define ICE_BAR0 0 +#define ICE_DFLT_NUM_DESC 128 +#define ICE_MIN_NUM_DESC 8 +#define ICE_MAX_NUM_DESC 8160 +#define ICE_REQ_DESC_MULTIPLE 32 +#define ICE_DFLT_TRAFFIC_CLASS BIT(0) +#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) +#define ICE_ETHTOOL_FWVER_LEN 32 +#define ICE_AQ_LEN 64 +#define ICE_MIN_MSIX 2 +#define ICE_NO_VSI 0xffff +#define ICE_MAX_VSI_ALLOC 130 +#define ICE_MAX_TXQS 2048 +#define ICE_MAX_RXQS 2048 +#define ICE_VSI_MAP_CONTIG 0 +#define ICE_VSI_MAP_SCATTER 1 +#define ICE_MAX_SCATTER_TXQS 16 +#define ICE_MAX_SCATTER_RXQS 16 +#define ICE_Q_WAIT_RETRY_LIMIT 10 +#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) +#define ICE_MAX_LG_RSS_QS 256 +#define ICE_MAX_SMALL_RSS_QS 8 +#define ICE_RES_VALID_BIT 0x8000 +#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) +#define ICE_INVAL_Q_INDEX 0xffff + +#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) + +#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \ + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +#define ICE_UP_TABLE_TRANSLATE(val, i) \ + (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ + ICE_AQ_VSI_UP_TABLE_UP##i##_M) + +#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) +#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) +#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) + +/* Macro for each VSI in a PF */ +#define ice_for_each_vsi(pf, i) \ + for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) + +/* Macros for each tx/rx ring in a VSI */ +#define ice_for_each_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->num_txq; (i)++) + +#define ice_for_each_rxq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) + +struct ice_tc_info { + u16 qoffset; + u16 qcount; +}; + +struct ice_tc_cfg { + u8 numtc; /* Total number of enabled TCs */ + u8 ena_tc; /* TX map */ + struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; +}; + +struct ice_res_tracker { + u16 num_entries; + u16 search_hint; + u16 list[1]; +}; + +struct ice_sw { + struct ice_pf *pf; + u16 sw_id; /* switch ID for this switch */ + u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ +}; + +enum ice_state { + __ICE_DOWN, + __ICE_NEEDS_RESTART, + __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */ + __ICE_PFR_REQ, /* set by driver and peers */ + __ICE_CORER_REQ, /* set by driver and peers */ + __ICE_GLOBR_REQ, /* set by driver and peers */ + __ICE_CORER_RECV, /* set by OICR handler */ + __ICE_GLOBR_RECV, /* set by OICR handler */ + __ICE_EMPR_RECV, /* set by OICR handler */ + __ICE_SUSPENDED, /* set on module remove path */ + __ICE_RESET_FAILED, /* set by reset/rebuild */ + __ICE_ADMINQ_EVENT_PENDING, + __ICE_FLTR_OVERFLOW_PROMISC, + __ICE_CFG_BUSY, + __ICE_SERVICE_SCHED, + __ICE_STATE_NBITS /* must be last */ +}; + +enum ice_vsi_flags { + ICE_VSI_FLAG_UMAC_FLTR_CHANGED, + ICE_VSI_FLAG_MMAC_FLTR_CHANGED, + ICE_VSI_FLAG_VLAN_FLTR_CHANGED, + ICE_VSI_FLAG_PROMISC_CHANGED, + ICE_VSI_FLAG_NBITS /* must be last */ +}; + +/* struct that defines a VSI, associated with a dev */ +struct ice_vsi { + struct net_device *netdev; + struct ice_sw *vsw; /* switch this VSI is on */ + struct ice_pf *back; /* back pointer to PF */ + struct ice_port_info *port_info; /* back pointer to port_info */ + struct ice_ring **rx_rings; /* rx ring array */ + struct ice_ring **tx_rings; /* tx ring array */ + struct ice_q_vector **q_vectors; /* q_vector array */ + + irqreturn_t (*irq_handler)(int irq, void *data); + + u64 tx_linearize; + DECLARE_BITMAP(state, __ICE_STATE_NBITS); + DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned int current_netdev_flags; + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + int num_q_vectors; + int base_vector; + enum ice_vsi_type type; + u16 vsi_num; /* HW (absolute) index of this VSI */ + u16 idx; /* software index in pf->vsi[] */ + + /* Interrupt thresholds */ + u16 work_lmt; + + /* RSS config */ + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ + u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ + + u16 max_frame; + u16 rx_buf_len; + + struct ice_aqc_vsi_props info; /* VSI properties */ + + /* VSI stats */ + struct rtnl_link_stats64 net_stats; + struct ice_eth_stats eth_stats; + struct ice_eth_stats eth_stats_prev; + + struct list_head tmp_sync_list; /* MAC filters to be synced */ + struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ + + bool irqs_ready; + bool current_isup; /* Sync 'link up' logging */ + bool stat_offsets_loaded; + + /* queue information */ + u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */ + u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */ + u16 alloc_txq; /* Allocated Tx queues */ + u16 num_txq; /* Used Tx queues */ + u16 alloc_rxq; /* Allocated Rx queues */ + u16 num_rxq; /* Used Rx queues */ + u16 num_desc; + struct ice_tc_cfg tc_cfg; +} ____cacheline_internodealigned_in_smp; + +/* struct that defines an interrupt vector */ +struct ice_q_vector { + struct ice_vsi *vsi; + cpumask_t affinity_mask; + struct napi_struct napi; + struct ice_ring_container rx; + struct ice_ring_container tx; + struct irq_affinity_notify affinity_notify; + u16 v_idx; /* index in the vsi->q_vector array. */ + u8 num_ring_tx; /* total number of tx rings in vector */ + u8 num_ring_rx; /* total number of rx rings in vector */ + char name[ICE_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +enum ice_pf_flags { + ICE_FLAG_MSIX_ENA, + ICE_FLAG_FLTR_SYNC, + ICE_FLAG_RSS_ENA, + ICE_PF_FLAGS_NBITS /* must be last */ +}; + +struct ice_pf { + struct pci_dev *pdev; + struct msix_entry *msix_entries; + struct ice_res_tracker *irq_tracker; + struct ice_vsi **vsi; /* VSIs created by the driver */ + struct ice_sw *first_sw; /* first switch created by firmware */ + DECLARE_BITMAP(state, __ICE_STATE_NBITS); + DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); + DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); + DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + unsigned long serv_tmr_period; + unsigned long serv_tmr_prev; + struct timer_list serv_tmr; + struct work_struct serv_task; + struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ + struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ + u32 msg_enable; + u32 hw_csum_rx_error; + u32 oicr_idx; /* Other interrupt cause vector index */ + u32 num_lan_msix; /* Total MSIX vectors for base driver */ + u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */ + u16 num_lan_tx; /* num lan tx queues setup */ + u16 num_lan_rx; /* num lan rx queues setup */ + u16 q_left_tx; /* remaining num tx queues left unclaimed */ + u16 q_left_rx; /* remaining num rx queues left unclaimed */ + u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ + u16 num_alloc_vsi; + u16 corer_count; /* Core reset count */ + u16 globr_count; /* Global reset count */ + u16 empr_count; /* EMP reset count */ + u16 pfr_count; /* PF reset count */ + + struct ice_hw_port_stats stats; + struct ice_hw_port_stats stats_prev; + struct ice_hw hw; + bool stat_prev_loaded; /* has previous stats been loaded */ + char int_name[ICE_INT_NAME_STR_LEN]; +}; + +struct ice_netdev_priv { + struct ice_vsi *vsi; +}; + +/** + * ice_irq_dynamic_ena - Enable default interrupt generation settings + * @hw: pointer to hw struct + * @vsi: pointer to vsi struct, can be NULL + * @q_vector: pointer to q_vector, can be NULL + */ +static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, + struct ice_q_vector *q_vector) +{ + u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx : + ((struct ice_pf *)hw->back)->oicr_idx; + int itr = ICE_ITR_NONE; + u32 val; + + /* clear the PBA here, as this function is meant to clean out all + * previous interrupts and enable the interrupt + */ + val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + (itr << GLINT_DYN_CTL_ITR_INDX_S); + if (vsi) + if (test_bit(__ICE_DOWN, vsi->state)) + return; + wr32(hw, GLINT_DYN_CTL(vector), val); +} + +static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) +{ + vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; + vsi->tc_cfg.numtc = 1; +} + +void ice_set_ethtool_ops(struct net_device *netdev); +int ice_up(struct ice_vsi *vsi); +int ice_down(struct ice_vsi *vsi); +int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); +void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); +void ice_print_link_msg(struct ice_vsi *vsi, bool isup); + +#endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h new file mode 100644 index 000000000000..5b13ca1bd85f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -0,0 +1,1352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_ADMINQ_CMD_H_ +#define _ICE_ADMINQ_CMD_H_ + +/* This header file defines the Admin Queue commands, error codes and + * descriptor format. It is shared between Firmware and Software. + */ + +#define ICE_MAX_VSI 768 +#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 +#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 + +struct ice_aqc_generic { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get version (direct 0x0001) */ +struct ice_aqc_get_ver { + __le32 rom_ver; + __le32 fw_build; + u8 fw_branch; + u8 fw_major; + u8 fw_minor; + u8 fw_patch; + u8 api_branch; + u8 api_major; + u8 api_minor; + u8 api_patch; +}; + +/* Queue Shutdown (direct 0x0003) */ +struct ice_aqc_q_shutdown { +#define ICE_AQC_DRIVER_UNLOADING BIT(0) + __le32 driver_unloading; + u8 reserved[12]; +}; + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +struct ice_aqc_req_res { + __le16 res_id; +#define ICE_AQC_RES_ID_NVM 1 +#define ICE_AQC_RES_ID_SDP 2 +#define ICE_AQC_RES_ID_CHNG_LOCK 3 +#define ICE_AQC_RES_ID_GLBL_LOCK 4 + __le16 access_type; +#define ICE_AQC_RES_ACCESS_READ 1 +#define ICE_AQC_RES_ACCESS_WRITE 2 + + /* Upon successful completion, FW writes this value and driver is + * expected to release resource before timeout. This value is provided + * in milliseconds. + */ + __le32 timeout; +#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 +#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 +#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 +#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 + /* For SDP: pin id of the SDP */ + __le32 res_number; + /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ + __le16 status; +#define ICE_AQ_RES_GLBL_SUCCESS 0 +#define ICE_AQ_RES_GLBL_IN_PROG 1 +#define ICE_AQ_RES_GLBL_DONE 2 + u8 reserved[2]; +}; + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct ice_aqc_list_caps { + u8 cmd_flags; + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +/* Device/Function buffer entry, repeated per reported capability */ +struct ice_aqc_list_caps_elem { + __le16 cap; +#define ICE_AQC_CAPS_VSI 0x0017 +#define ICE_AQC_CAPS_RSS 0x0040 +#define ICE_AQC_CAPS_RXQS 0x0041 +#define ICE_AQC_CAPS_TXQS 0x0042 +#define ICE_AQC_CAPS_MSIX 0x0043 +#define ICE_AQC_CAPS_MAX_MTU 0x0047 + + u8 major_ver; + u8 minor_ver; + /* Number of resources described by this capability */ + __le32 number; + /* Only meaningful for some types of resources */ + __le32 logical_id; + /* Only meaningful for some types of resources */ + __le32 phys_id; + __le64 rsvd1; + __le64 rsvd2; +}; + +/* Manage MAC address, read command - indirect (0x0107) + * This struct is also used for the response + */ +struct ice_aqc_manage_mac_read { + __le16 flags; /* Zeroed by device driver */ +#define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4) +#define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5) +#define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6) +#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) +#define ICE_AQC_MAN_MAC_READ_S 4 +#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0) + u8 num_addr; /* Used in response */ + u8 reserved[3]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response buffer format for manage MAC read command */ +struct ice_aqc_manage_mac_read_resp { + u8 lport_num; + u8 addr_type; +#define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0 +#define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1 + u8 mac_addr[ETH_ALEN]; +}; + +/* Manage MAC address, write command - direct (0x0108) */ +struct ice_aqc_manage_mac_write { + u8 port_num; + u8 flags; +#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) +#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) +#define ICE_AQC_MAN_MAC_WR_S 6 +#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S) +#define ICE_AQC_MAN_MAC_UPDATE_LAA 0 +#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S) + /* High 16 bits of MAC address in big endian order */ + __be16 sah; + /* Low 32 bits of MAC address in big endian order */ + __be32 sal; + __le32 addr_high; + __le32 addr_low; +}; + +/* Clear PXE Command and response (direct 0x0110) */ +struct ice_aqc_clear_pxe { + u8 rx_cnt; +#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2 + u8 reserved[15]; +}; + +/* Get switch configuration (0x0200) */ +struct ice_aqc_get_sw_cfg { + /* Reserved for command and copy of request flags for response */ + __le16 flags; + /* First desc in case of command and next_elem in case of response + * In case of response, if it is not zero, means all the configuration + * was not returned and new command shall be sent with this value in + * the 'first desc' field + */ + __le16 element; + /* Reserved for command, only used for response */ + __le16 num_elems; + __le16 rsvd; + __le32 addr_high; + __le32 addr_low; +}; + +/* Each entry in the response buffer is of the following type: */ +struct ice_aqc_get_sw_cfg_resp_elem { + /* VSI/Port Number */ + __le16 vsi_port_num; +#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0 +#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \ + (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S) +#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14 +#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S) +#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0 +#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1 +#define ICE_AQC_GET_SW_CONF_RESP_VSI 2 + + /* SWID VSI/Port belongs to */ + __le16 swid; + + /* Bit 14..0 : PF/VF number VSI belongs to + * Bit 15 : VF indication bit + */ + __le16 pf_vf_num; +#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0 +#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \ + (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S) +#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) +}; + +/* The response buffer is as follows. Note that the length of the + * elements array varies with the length of the command response. + */ +struct ice_aqc_get_sw_cfg_resp { + struct ice_aqc_get_sw_cfg_resp_elem elements[1]; +}; + +/* These resource type defines are used for all switch resource + * commands where a resource type is required, such as: + * Get Resource Allocation command (indirect 0x0204) + * Allocate Resources command (indirect 0x0208) + * Free Resources command (indirect 0x0209) + * Get Allocated Resource Descriptors Command (indirect 0x020A) + */ +#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 +#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 + +/* Allocate Resources command (indirect 0x0208) + * Free Resources command (indirect 0x0209) + */ +struct ice_aqc_alloc_free_res_cmd { + __le16 num_entries; /* Number of Resource entries */ + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Resource descriptor */ +struct ice_aqc_res_elem { + union { + __le16 sw_resp; + __le16 flu_resp; + } e; +}; + +/* Buffer for Allocate/Free Resources commands */ +struct ice_aqc_alloc_free_res_elem { + __le16 res_type; /* Types defined above cmd 0x0204 */ +#define ICE_AQC_RES_TYPE_SHARED_S 7 +#define ICE_AQC_RES_TYPE_SHARED_M (0x1 << ICE_AQC_RES_TYPE_SHARED_S) +#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8 +#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \ + (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S) + __le16 num_elems; + struct ice_aqc_res_elem elem[1]; +}; + +/* Add VSI (indirect 0x0210) + * Update VSI (indirect 0x0211) + * Get VSI (indirect 0x0212) + * Free VSI (indirect 0x0213) + */ +struct ice_aqc_add_get_update_free_vsi { + __le16 vsi_num; +#define ICE_AQ_VSI_NUM_S 0 +#define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S) +#define ICE_AQ_VSI_IS_VALID BIT(15) + __le16 cmd_flags; +#define ICE_AQ_VSI_KEEP_ALLOC 0x1 + u8 vf_id; + u8 reserved; + __le16 vsi_flags; +#define ICE_AQ_VSI_TYPE_S 0 +#define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S) +#define ICE_AQ_VSI_TYPE_VF 0x0 +#define ICE_AQ_VSI_TYPE_VMDQ2 0x1 +#define ICE_AQ_VSI_TYPE_PF 0x2 +#define ICE_AQ_VSI_TYPE_EMP_MNG 0x3 + __le32 addr_high; + __le32 addr_low; +}; + +/* Response descriptor for: + * Add VSI (indirect 0x0210) + * Update VSI (indirect 0x0211) + * Free VSI (indirect 0x0213) + */ +struct ice_aqc_add_update_free_vsi_resp { + __le16 vsi_num; + __le16 ext_status; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_vsi_props { + __le16 valid_sections; +#define ICE_AQ_VSI_PROP_SW_VALID BIT(0) +#define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1) +#define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2) +#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3) +#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4) +#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5) +#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6) +#define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7) +#define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8) +#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11) +#define ICE_AQ_VSI_PROP_PASID_VALID BIT(12) + /* switch section */ + u8 sw_id; + u8 sw_flags; +#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5) +#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6) +#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7) + u8 sw_flags2; +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \ + (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) +#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) +#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) + u8 veb_stat_id; +#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 +#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) +#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5) + /* security section */ + u8 sec_flags; +#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0) +#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2) +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) +#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0) + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + u8 pvlan_reserved[2]; + u8 port_vlan_flags; +#define ICE_AQ_VSI_PVLAN_MODE_S 0 +#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) +#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 +#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 +#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 +#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) +#define ICE_AQ_VSI_PVLAN_EMOD_S 3 +#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) +#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) +#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) +#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) +#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) + u8 pvlan_reserved2[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 +#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) +#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 +#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) +#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 +#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) +#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 +#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) +#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 +#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) +#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 +#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) +#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 +#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) +#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 +#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) + __le32 egress_table; /* same defines as for ingress table */ + /* outer tags section */ + __le16 outer_tag; + u8 outer_tag_flags; +#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0 +#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S) +#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0 +#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1 +#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) +#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 +#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 +#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4) +#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6) + u8 outer_tag_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 +#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) + __le16 q_mapping[16]; +#define ICE_AQ_VSI_Q_S 0 +#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) + __le16 tc_mapping[8]; +#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 +#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) +#define ICE_AQ_VSI_TC_Q_NUM_S 11 +#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) + /* queueing option section */ + u8 q_opt_rss; +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) + u8 q_opt_tc; +#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 +#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) +#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) + u8 q_opt_flags; +#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) + u8 q_opt_reserved[3]; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + /* section 10 */ + __le16 sect_10_reserved; + /* flow director section */ + __le16 fd_options; +#define ICE_AQ_VSI_FD_ENABLE BIT(0) +#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) +#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) + __le16 max_fd_fltr_dedicated; + __le16 max_fd_fltr_shared; + __le16 fd_def_q; +#define ICE_AQ_VSI_FD_DEF_Q_S 0 +#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) +#define ICE_AQ_VSI_FD_DEF_GRP_S 12 +#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) + __le16 fd_report_opt; +#define ICE_AQ_VSI_FD_REPORT_Q_S 0 +#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) +#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 +#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) +#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) + /* PASID section */ + __le32 pasid_id; +#define ICE_AQ_VSI_PASID_ID_S 0 +#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) +#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) + u8 reserved[24]; +}; + +/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) + */ +struct ice_aqc_sw_rules { + /* ops: add switch rules, referring the number of rules. + * ops: update switch rules, referring the number of filters + * ops: remove switch rules, referring the entry index. + * ops: get switch rules, referring to the number of filters. + */ + __le16 num_rules_fltr_entry_index; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Add/Update/Get/Remove lookup Rx/Tx command/response entry + * This structures describes the lookup rules and associated actions. "index" + * is returned as part of a response to a successful Add command, and can be + * used to identify the rule for Update/Get/Remove commands. + */ +struct ice_sw_rule_lkup_rx_tx { + __le16 recipe_id; +#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10 + /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */ + __le16 src; + __le32 act; + + /* Bit 0:1 - Action type */ +#define ICE_SINGLE_ACT_TYPE_S 0x00 +#define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S) + + /* Bit 2 - Loop back enable + * Bit 3 - LAN enable + */ +#define ICE_SINGLE_ACT_LB_ENABLE BIT(2) +#define ICE_SINGLE_ACT_LAN_ENABLE BIT(3) + + /* Action type = 0 - Forward to VSI or VSI list */ +#define ICE_SINGLE_ACT_VSI_FORWARDING 0x0 + +#define ICE_SINGLE_ACT_VSI_ID_S 4 +#define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S) +#define ICE_SINGLE_ACT_VSI_LIST_ID_S 4 +#define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S) + /* This bit needs to be set if action is forward to VSI list */ +#define ICE_SINGLE_ACT_VSI_LIST BIT(14) +#define ICE_SINGLE_ACT_VALID_BIT BIT(17) +#define ICE_SINGLE_ACT_DROP BIT(18) + + /* Action type = 1 - Forward to Queue of Queue group */ +#define ICE_SINGLE_ACT_TO_Q 0x1 +#define ICE_SINGLE_ACT_Q_INDEX_S 4 +#define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S) +#define ICE_SINGLE_ACT_Q_REGION_S 15 +#define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S) +#define ICE_SINGLE_ACT_Q_PRIORITY BIT(18) + + /* Action type = 2 - Prune */ +#define ICE_SINGLE_ACT_PRUNE 0x2 +#define ICE_SINGLE_ACT_EGRESS BIT(15) +#define ICE_SINGLE_ACT_INGRESS BIT(16) +#define ICE_SINGLE_ACT_PRUNET BIT(17) + /* Bit 18 should be set to 0 for this action */ + + /* Action type = 2 - Pointer */ +#define ICE_SINGLE_ACT_PTR 0x2 +#define ICE_SINGLE_ACT_PTR_VAL_S 4 +#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S) + /* Bit 18 should be set to 1 */ +#define ICE_SINGLE_ACT_PTR_BIT BIT(18) + + /* Action type = 3 - Other actions. Last two bits + * are other action identifier + */ +#define ICE_SINGLE_ACT_OTHER_ACTS 0x3 +#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 +#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ + (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) + + /* Bit 17:18 - Defines other actions */ + /* Other action = 0 - Mirror VSI */ +#define ICE_SINGLE_OTHER_ACT_MIRROR 0 +#define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4 +#define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \ + (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S) + + /* Other action = 3 - Set Stat count */ +#define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3 +#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4 +#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \ + (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S) + + __le16 index; /* The index of the rule in the lookup table */ + /* Length and values of the header to be matched per recipe or + * lookup-type + */ + __le16 hdr_len; + u8 hdr[1]; +} __packed; + +/* Add/Update/Remove large action command/response entry + * "index" is returned as part of a response to a successful Add command, and + * can be used to identify the action for Update/Get/Remove commands. + */ +struct ice_sw_rule_lg_act { + __le16 index; /* Index in large action table */ + __le16 size; + __le32 act[1]; /* array of size for actions */ + /* Max number of large actions */ +#define ICE_MAX_LG_ACT 4 + /* Bit 0:1 - Action type */ +#define ICE_LG_ACT_TYPE_S 0 +#define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S) + + /* Action type = 0 - Forward to VSI or VSI list */ +#define ICE_LG_ACT_VSI_FORWARDING 0 +#define ICE_LG_ACT_VSI_ID_S 3 +#define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S) +#define ICE_LG_ACT_VSI_LIST_ID_S 3 +#define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S) + /* This bit needs to be set if action is forward to VSI list */ +#define ICE_LG_ACT_VSI_LIST BIT(13) + +#define ICE_LG_ACT_VALID_BIT BIT(16) + + /* Action type = 1 - Forward to Queue of Queue group */ +#define ICE_LG_ACT_TO_Q 0x1 +#define ICE_LG_ACT_Q_INDEX_S 3 +#define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S) +#define ICE_LG_ACT_Q_REGION_S 14 +#define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S) +#define ICE_LG_ACT_Q_PRIORITY_SET BIT(17) + + /* Action type = 2 - Prune */ +#define ICE_LG_ACT_PRUNE 0x2 +#define ICE_LG_ACT_EGRESS BIT(14) +#define ICE_LG_ACT_INGRESS BIT(15) +#define ICE_LG_ACT_PRUNET BIT(16) + + /* Action type = 3 - Mirror VSI */ +#define ICE_LG_OTHER_ACT_MIRROR 0x3 +#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 +#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) + + /* Action type = 5 - Large Action */ +#define ICE_LG_ACT_GENERIC 0x5 +#define ICE_LG_ACT_GENERIC_VALUE_S 3 +#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) +#define ICE_LG_ACT_GENERIC_OFFSET_S 19 +#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) +#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 +#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) + + /* Action = 7 - Set Stat count */ +#define ICE_LG_ACT_STAT_COUNT 0x7 +#define ICE_LG_ACT_STAT_COUNT_S 3 +#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) +}; + +/* Add/Update/Remove VSI list command/response entry + * "index" is returned as part of a response to a successful Add command, and + * can be used to identify the VSI list for Update/Get/Remove commands. + */ +struct ice_sw_rule_vsi_list { + __le16 index; /* Index of VSI/Prune list */ + __le16 number_vsi; + __le16 vsi[1]; /* Array of number_vsi VSI numbers */ +}; + +/* Query VSI list command/response entry */ +struct ice_sw_rule_vsi_list_query { + __le16 index; + DECLARE_BITMAP(vsi_list, ICE_MAX_VSI); +} __packed; + +/* Add switch rule response: + * Content of return buffer is same as the input buffer. The status field and + * LUT index are updated as part of the response + */ +struct ice_aqc_sw_rules_elem { + __le16 type; /* Switch rule type, one of T_... */ +#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 +#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 +#define ICE_AQC_SW_RULES_T_LG_ACT 0x2 +#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 +#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 + __le16 status; + union { + struct ice_sw_rule_lkup_rx_tx lkup_tx_rx; + struct ice_sw_rule_lg_act lg_act; + struct ice_sw_rule_vsi_list vsi_list; + struct ice_sw_rule_vsi_list_query vsi_list_query; + } __packed pdata; +}; + +/* Get Default Topology (indirect 0x0400) */ +struct ice_aqc_get_topo { + u8 port_num; + u8 num_branches; + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Update TSE (indirect 0x0403) + * Get TSE (indirect 0x0404) + */ +struct ice_aqc_get_cfg_elem { + __le16 num_elem_req; /* Used by commands */ + __le16 num_elem_resp; /* Used by responses */ + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the buffer for: + * Suspend Nodes (indirect 0x0409) + * Resume Nodes (indirect 0x040A) + */ +struct ice_aqc_suspend_resume_elem { + __le32 teid[1]; +}; + +/* Add TSE (indirect 0x0401) + * Delete TSE (indirect 0x040F) + * Move TSE (indirect 0x0408) + */ +struct ice_aqc_add_move_delete_elem { + __le16 num_grps_req; + __le16 num_grps_updated; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_elem_info_bw { + __le16 bw_profile_idx; + __le16 bw_alloc; +}; + +struct ice_aqc_txsched_elem { + u8 elem_type; /* Special field, reserved for some aq calls */ +#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0 +#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1 +#define ICE_AQC_ELEM_TYPE_TC 0x2 +#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3 +#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4 +#define ICE_AQC_ELEM_TYPE_LEAF 0x5 +#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6 + u8 valid_sections; +#define ICE_AQC_ELEM_VALID_GENERIC BIT(0) +#define ICE_AQC_ELEM_VALID_CIR BIT(1) +#define ICE_AQC_ELEM_VALID_EIR BIT(2) +#define ICE_AQC_ELEM_VALID_SHARED BIT(3) + u8 generic; +#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1 +#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1 +#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) +#define ICE_AQC_ELEM_GENERIC_SP_S 0x4 +#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) +#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5 +#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \ + (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) + u8 flags; /* Special field, reserved for some aq calls */ +#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1 + struct ice_aqc_elem_info_bw cir_bw; + struct ice_aqc_elem_info_bw eir_bw; + __le16 srl_id; + __le16 reserved2; +}; + +struct ice_aqc_txsched_elem_data { + __le32 parent_teid; + __le32 node_teid; + struct ice_aqc_txsched_elem data; +}; + +struct ice_aqc_txsched_topo_grp_info_hdr { + __le32 parent_teid; + __le16 num_elems; + __le16 reserved2; +}; + +struct ice_aqc_add_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + struct ice_aqc_txsched_elem_data generic[1]; +}; + +struct ice_aqc_get_topo_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + struct ice_aqc_txsched_elem_data + generic[ICE_AQC_TOPO_MAX_LEVEL_NUM]; +}; + +struct ice_aqc_delete_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + __le32 teid[1]; +}; + +/* Query Scheduler Resource Allocation (indirect 0x0412) + * This indirect command retrieves the scheduler resources allocated by + * EMP Firmware to the given PF. + */ +struct ice_aqc_query_txsched_res { + u8 reserved[8]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_generic_sched_props { + __le16 phys_levels; + __le16 logical_levels; + u8 flattening_bitmap; + u8 max_device_cgds; + u8 max_pf_cgds; + u8 rsvd0; + __le16 rdma_qsets; + u8 rsvd1[22]; +}; + +struct ice_aqc_layer_props { + u8 logical_layer; + u8 chunk_size; + __le16 max_device_nodes; + __le16 max_pf_nodes; + u8 rsvd0[2]; + __le16 max_shared_rate_lmtr; + __le16 max_children; + __le16 max_cir_rl_profiles; + __le16 max_eir_rl_profiles; + __le16 max_srl_profiles; + u8 rsvd1[14]; +}; + +struct ice_aqc_query_txsched_res_resp { + struct ice_aqc_generic_sched_props sched_props; + struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM]; +}; + +/* Get PHY capabilities (indirect 0x0600) */ +struct ice_aqc_get_phy_caps { + u8 lport_num; + u8 reserved; + __le16 param0; + /* 18.0 - Report qualified modules */ +#define ICE_AQC_GET_PHY_RQM BIT(0) + /* 18.1 - 18.2 : Report mode + * 00b - Report NVM capabilities + * 01b - Report topology capabilities + * 10b - Report SW configured + */ +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_NVM_CAP 0 +#define ICE_AQC_REPORT_TOPO_CAP BIT(1) +#define ICE_AQC_REPORT_SW_CFG BIT(2) + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is #define of PHY type (Extended): + * The first set of defines is for phy_type_low. + */ +#define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) +#define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) +#define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) +#define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) +#define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) +#define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) +#define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) +#define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) +#define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) +#define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) +#define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) +#define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) +#define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) +#define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) +#define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) +#define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) +#define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) +#define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) +#define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) +#define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) +#define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) +#define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) +#define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) +#define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) +#define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) +#define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) +#define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) +#define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) +#define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) +#define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) +#define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30) +#define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31) +#define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32) +#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) +#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) +#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) +#define ICE_PHY_TYPE_LOW_MAX_INDEX 63 + +struct ice_aqc_get_phy_caps_data { + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 reserved; + u8 caps; +#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) +#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) +#define ICE_AQC_PHY_LOW_POWER_MODE BIT(2) +#define ICE_AQC_PHY_EN_LINK BIT(3) +#define ICE_AQC_PHY_AN_MODE BIT(4) +#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5) + u8 low_power_ctrl; +#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) + __le16 eee_cap; +#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) +#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) +#define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2) +#define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3) +#define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4) +#define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5) +#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) + __le16 eeer_value; + u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ + u8 link_fec_options; +#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) +#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) +#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2) +#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3) +#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) +#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) +#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) + u8 extended_compliance_code; +#define ICE_MODULE_TYPE_TOTAL_BYTE 3 + u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; +#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 +#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 +#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) +#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) +#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 +#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 + u8 qualified_module_count; +#define ICE_AQC_QUAL_MOD_COUNT_MAX 16 + struct { + u8 v_oui[3]; + u8 rsvd1; + u8 v_part[16]; + __le32 v_rev; + __le64 rsvd8; + } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; +}; + +/* Set PHY capabilities (direct 0x0601) + * NOTE: This command must be followed by setup link and restart auto-neg + */ +struct ice_aqc_set_phy_cfg { + u8 lport_num; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set PHY config command data structure */ +struct ice_aqc_set_phy_cfg_data { + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 rsvd0; + u8 caps; +#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) +#define ICE_AQ_PHY_ENA_LINK BIT(3) +#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5) + u8 low_power_ctrl; + __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ + __le16 eeer_value; + u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ + u8 rsvd1; +}; + +/* Restart AN command data structure (direct 0x0605) + * Also used for response, with only the lport_num field present. + */ +struct ice_aqc_restart_an { + u8 lport_num; + u8 reserved; + u8 cmd_flags; +#define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1) +#define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2) + u8 reserved2[13]; +}; + +/* Get link status (indirect 0x0607), also used for Link Status Event */ +struct ice_aqc_get_link_status { + u8 lport_num; + u8 reserved; + __le16 cmd_flags; +#define ICE_AQ_LSE_M 0x3 +#define ICE_AQ_LSE_NOP 0x0 +#define ICE_AQ_LSE_DIS 0x2 +#define ICE_AQ_LSE_ENA 0x3 + /* only response uses this flag */ +#define ICE_AQ_LSE_IS_ENABLED 0x1 + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get link status response data structure, also used for Link Status Event */ +struct ice_aqc_get_link_status_data { + u8 topo_media_conflict; +#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0) +#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1) +#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2) + u8 reserved1; + u8 link_info; +#define ICE_AQ_LINK_UP BIT(0) /* Link Status */ +#define ICE_AQ_LINK_FAULT BIT(1) +#define ICE_AQ_LINK_FAULT_TX BIT(2) +#define ICE_AQ_LINK_FAULT_RX BIT(3) +#define ICE_AQ_LINK_FAULT_REMOTE BIT(4) +#define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */ +#define ICE_AQ_MEDIA_AVAILABLE BIT(6) +#define ICE_AQ_SIGNAL_DETECT BIT(7) + u8 an_info; +#define ICE_AQ_AN_COMPLETED BIT(0) +#define ICE_AQ_LP_AN_ABILITY BIT(1) +#define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */ +#define ICE_AQ_FEC_EN BIT(3) +#define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */ +#define ICE_AQ_LINK_PAUSE_TX BIT(5) +#define ICE_AQ_LINK_PAUSE_RX BIT(6) +#define ICE_AQ_QUALIFIED_MODULE BIT(7) + u8 ext_info; +#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0) +#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ + /* Port TX Suspended */ +#define ICE_AQ_LINK_TX_S 2 +#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S) +#define ICE_AQ_LINK_TX_ACTIVE 0 +#define ICE_AQ_LINK_TX_DRAINED 1 +#define ICE_AQ_LINK_TX_FLUSHED 3 + u8 reserved2; + __le16 max_frame_size; + u8 cfg; +#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) +#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) +#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) + /* Pacing Config */ +#define ICE_AQ_CFG_PACING_S 3 +#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) +#define ICE_AQ_CFG_PACING_TYPE_M BIT(7) +#define ICE_AQ_CFG_PACING_TYPE_AVG 0 +#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M + /* External Device Power Ability */ + u8 power_desc; +#define ICE_AQ_PWR_CLASS_M 0x3 +#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0 +#define ICE_AQ_LINK_PWR_BASET_HIGH 1 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +#define ICE_AQ_LINK_SPEED_10MB BIT(0) +#define ICE_AQ_LINK_SPEED_100MB BIT(1) +#define ICE_AQ_LINK_SPEED_1000MB BIT(2) +#define ICE_AQ_LINK_SPEED_2500MB BIT(3) +#define ICE_AQ_LINK_SPEED_5GB BIT(4) +#define ICE_AQ_LINK_SPEED_10GB BIT(5) +#define ICE_AQ_LINK_SPEED_20GB BIT(6) +#define ICE_AQ_LINK_SPEED_25GB BIT(7) +#define ICE_AQ_LINK_SPEED_40GB BIT(8) +#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) + __le32 reserved3; /* Aligns next field to 8-byte boundary */ + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 reserved4; +}; + +/* Set event mask command (direct 0x0613) */ +struct ice_aqc_set_event_mask { + u8 lport_num; + u8 reserved[7]; + __le16 event_mask; +#define ICE_AQ_LINK_EVENT_UPDOWN BIT(1) +#define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2) +#define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3) +#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4) +#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) +#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6) +#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) +#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) +#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) + u8 reserved1[6]; +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct ice_aqc_nvm { + u8 cmd_flags; +#define ICE_AQC_NVM_LAST_CMD BIT(0) +#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ +#define ICE_AQC_NVM_PRESERVATION_S 1 +#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_ALL BIT(1) +#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_FLASH_ONLY BIT(7) + u8 module_typeid; + __le16 length; +#define ICE_AQC_NVM_ERASE_LEN 0xFFFF + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get/Set RSS key (indirect 0x0B04/0x0B02) */ +struct ice_aqc_get_set_rss_key { +#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) +#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0 +#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 +#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC + +struct ice_aqc_get_set_rss_keys { + u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; + u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; +}; + +/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ +struct ice_aqc_get_set_rss_lut { +#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) + __le16 vsi_id; +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ + (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) + +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2 + +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \ + (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) + +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2 + +#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4 +#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \ + (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) + + __le16 flags; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +/* Add TX LAN Queues (indirect 0x0C30) */ +struct ice_aqc_add_txqs { + u8 num_qgrps; + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the descriptor of each queue entry for the Add TX LAN Queues + * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. + */ +struct ice_aqc_add_txqs_perq { + __le16 txq_id; + u8 rsvd[2]; + __le32 q_teid; + u8 txq_ctx[22]; + u8 rsvd2[2]; + struct ice_aqc_txsched_elem info; +}; + +/* The format of the command buffer for Add TX LAN Queues (0x0C30) + * is an array of the following structs. Please note that the length of + * each struct ice_aqc_add_tx_qgrp is variable due + * to the variable number of queues in each group! + */ +struct ice_aqc_add_tx_qgrp { + __le32 parent_teid; + u8 num_txqs; + u8 rsvd[3]; + struct ice_aqc_add_txqs_perq txqs[1]; +}; + +/* Disable TX LAN Queues (indirect 0x0C31) */ +struct ice_aqc_dis_txqs { + u8 cmd_type; +#define ICE_AQC_Q_DIS_CMD_S 0 +#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2) +#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3) + u8 num_entries; + __le16 vmvf_and_timeout; +#define ICE_AQC_Q_DIS_VMVF_NUM_S 0 +#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S) +#define ICE_AQC_Q_DIS_TIMEOUT_S 10 +#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S) + __le32 blocked_cgds; + __le32 addr_high; + __le32 addr_low; +}; + +/* The buffer for Disable TX LAN Queues (indirect 0x0C31) + * contains the following structures, arrayed one after the + * other. + * Note: Since the q_id is 16 bits wide, if the + * number of queues is even, then 2 bytes of alignment MUST be + * added before the start of the next group, to allow correct + * alignment of the parent_teid field. + */ +struct ice_aqc_dis_txq_item { + __le32 parent_teid; + u8 num_qs; + u8 rsvd; + /* The length of the q_id array varies according to num_qs */ + __le16 q_id[1]; + /* This only applies from F8 onward */ +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15 +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \ + (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \ + (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) +}; + +struct ice_aqc_dis_txq { + struct ice_aqc_dis_txq_item qgrps[1]; +}; + +/** + * struct ice_aq_desc - Admin Queue (AQ) descriptor + * @flags: ICE_AQ_FLAG_* flags + * @opcode: AQ command opcode + * @datalen: length in bytes of indirect/external data buffer + * @retval: return value from firmware + * @cookie_h: opaque data high-half + * @cookie_l: opaque data low-half + * @params: command-specific parameters + * + * Descriptor format for commands the driver posts on the Admin Transmit Queue + * (ATQ). The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate + * result of the command are written to the Admin Receive Queue (ARQ) using + * the same descriptor format. Descriptors are in little-endian notation with + * 32-bit words. + */ +struct ice_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + u8 raw[16]; + struct ice_aqc_generic generic; + struct ice_aqc_get_ver get_ver; + struct ice_aqc_q_shutdown q_shutdown; + struct ice_aqc_req_res res_owner; + struct ice_aqc_manage_mac_read mac_read; + struct ice_aqc_manage_mac_write mac_write; + struct ice_aqc_clear_pxe clear_pxe; + struct ice_aqc_list_caps get_cap; + struct ice_aqc_get_phy_caps get_phy; + struct ice_aqc_set_phy_cfg set_phy; + struct ice_aqc_restart_an restart_an; + struct ice_aqc_get_sw_cfg get_sw_conf; + struct ice_aqc_sw_rules sw_rules; + struct ice_aqc_get_topo get_topo; + struct ice_aqc_get_cfg_elem get_update_elem; + struct ice_aqc_query_txsched_res query_sched_res; + struct ice_aqc_add_move_delete_elem add_move_delete_elem; + struct ice_aqc_nvm nvm; + struct ice_aqc_get_set_rss_lut get_set_rss_lut; + struct ice_aqc_get_set_rss_key get_set_rss_key; + struct ice_aqc_add_txqs add_txqs; + struct ice_aqc_dis_txqs dis_txqs; + struct ice_aqc_add_get_update_free_vsi vsi_cmd; + struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; + struct ice_aqc_set_event_mask set_event_mask; + struct ice_aqc_get_link_status get_link_status; + } params; +}; + +/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ +#define ICE_AQ_LG_BUF 512 + +#define ICE_AQ_FLAG_ERR_S 2 +#define ICE_AQ_FLAG_LB_S 9 +#define ICE_AQ_FLAG_RD_S 10 +#define ICE_AQ_FLAG_BUF_S 12 +#define ICE_AQ_FLAG_SI_S 13 + +#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ +#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ +#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ +#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */ +#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */ + +/* error codes */ +enum ice_aq_err { + ICE_AQ_RC_OK = 0, /* success */ + ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ + ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ + ICE_AQ_RC_EEXIST = 13, /* object already exists */ + ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ +}; + +/* Admin Queue command opcodes */ +enum ice_adminq_opc { + /* AQ commands */ + ice_aqc_opc_get_ver = 0x0001, + ice_aqc_opc_q_shutdown = 0x0003, + + /* resource ownership */ + ice_aqc_opc_req_res = 0x0008, + ice_aqc_opc_release_res = 0x0009, + + /* device/function capabilities */ + ice_aqc_opc_list_func_caps = 0x000A, + ice_aqc_opc_list_dev_caps = 0x000B, + + /* manage MAC address */ + ice_aqc_opc_manage_mac_read = 0x0107, + ice_aqc_opc_manage_mac_write = 0x0108, + + /* PXE */ + ice_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + ice_aqc_opc_get_sw_cfg = 0x0200, + + /* Alloc/Free/Get Resources */ + ice_aqc_opc_alloc_res = 0x0208, + ice_aqc_opc_free_res = 0x0209, + + /* VSI commands */ + ice_aqc_opc_add_vsi = 0x0210, + ice_aqc_opc_update_vsi = 0x0211, + ice_aqc_opc_free_vsi = 0x0213, + + /* switch rules population commands */ + ice_aqc_opc_add_sw_rules = 0x02A0, + ice_aqc_opc_update_sw_rules = 0x02A1, + ice_aqc_opc_remove_sw_rules = 0x02A2, + + ice_aqc_opc_clear_pf_cfg = 0x02A4, + + /* transmit scheduler commands */ + ice_aqc_opc_get_dflt_topo = 0x0400, + ice_aqc_opc_add_sched_elems = 0x0401, + ice_aqc_opc_suspend_sched_elems = 0x0409, + ice_aqc_opc_resume_sched_elems = 0x040A, + ice_aqc_opc_delete_sched_elems = 0x040F, + ice_aqc_opc_query_sched_res = 0x0412, + + /* PHY commands */ + ice_aqc_opc_get_phy_caps = 0x0600, + ice_aqc_opc_set_phy_cfg = 0x0601, + ice_aqc_opc_restart_an = 0x0605, + ice_aqc_opc_get_link_status = 0x0607, + ice_aqc_opc_set_event_mask = 0x0613, + + /* NVM commands */ + ice_aqc_opc_nvm_read = 0x0701, + + /* RSS commands */ + ice_aqc_opc_set_rss_key = 0x0B02, + ice_aqc_opc_set_rss_lut = 0x0B03, + ice_aqc_opc_get_rss_key = 0x0B04, + ice_aqc_opc_get_rss_lut = 0x0B05, + + /* TX queue handling commands/events */ + ice_aqc_opc_add_txqs = 0x0C30, + ice_aqc_opc_dis_txqs = 0x0C31, +}; + +#endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c new file mode 100644 index 000000000000..385f5d425d19 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -0,0 +1,2233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_sched.h" +#include "ice_adminq_cmd.h" + +#define ICE_PF_RESET_WAIT_COUNT 200 + +#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ + wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ + ((ICE_RX_OPC_MDID << \ + GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ + GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ + (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ + GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) + +#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ + wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ + (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ + GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ + (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ + GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ + (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ + GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ + (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ + GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) + +/** + * ice_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the MAC type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + */ +static enum ice_status ice_set_mac_type(struct ice_hw *hw) +{ + if (hw->vendor_id != PCI_VENDOR_ID_INTEL) + return ICE_ERR_DEVICE_NOT_SUPPORTED; + + hw->mac_type = ICE_MAC_GENERIC; + return 0; +} + +/** + * ice_clear_pf_cfg - Clear PF configuration + * @hw: pointer to the hardware structure + */ +enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_manage_mac_read - manage MAC address read command + * @hw: pointer to the hw struct + * @buf: a virtual buffer to hold the manage MAC read response + * @buf_size: Size of the virtual buffer + * @cd: pointer to command details structure or NULL + * + * This function is used to return per PF station MAC address (0x0107). + * NOTE: Upon successful completion of this command, MAC address information + * is returned in user specified buffer. Please interpret user specified + * buffer as "manage_mac_read" response. + * Response such as various MAC addresses are stored in HW struct (port.mac) + * ice_aq_discover_caps is expected to be called before this function is called. + */ +static enum ice_status +ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_manage_mac_read_resp *resp; + struct ice_aqc_manage_mac_read *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 flags; + + cmd = &desc.params.mac_read; + + if (buf_size < sizeof(*resp)) + return ICE_ERR_BUF_TOO_SHORT; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + + resp = (struct ice_aqc_manage_mac_read_resp *)buf; + flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; + + if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { + ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); + return ICE_ERR_CFG; + } + + ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); + ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); + return 0; +} + +/** + * ice_aq_get_phy_caps - returns PHY capabilities + * @pi: port information structure + * @qual_mods: report qualified modules + * @report_mode: report mode capabilities + * @pcaps: structure for PHY capabilities to be filled + * @cd: pointer to command details structure or NULL + * + * Returns the various PHY capabilities supported on the Port (0x0600) + */ +static enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *pcaps, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_phy_caps *cmd; + u16 pcaps_size = sizeof(*pcaps); + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_phy; + + if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); + + if (qual_mods) + cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); + + cmd->param0 |= cpu_to_le16(report_mode); + status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); + + if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) + pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); + + return status; +} + +/** + * ice_get_media_type - Gets media type + * @pi: port information structure + */ +static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) +{ + struct ice_link_status *hw_link_info; + + if (!pi) + return ICE_MEDIA_UNKNOWN; + + hw_link_info = &pi->phy.link_info; + + if (hw_link_info->phy_type_low) { + switch (hw_link_info->phy_type_low) { + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + return ICE_MEDIA_FIBER; + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_T: + return ICE_MEDIA_BASET; + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + return ICE_MEDIA_DA; + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + return ICE_MEDIA_BACKPLANE; + } + } + + return ICE_MEDIA_UNKNOWN; +} + +/** + * ice_aq_get_link_info + * @pi: port information structure + * @ena_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cd: pointer to command details structure or NULL + * + * Get Link Status (0x607). Returns the link status of the adapter. + */ +enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd) +{ + struct ice_link_status *hw_link_info_old, *hw_link_info; + struct ice_aqc_get_link_status_data link_data = { 0 }; + struct ice_aqc_get_link_status *resp; + enum ice_media_type *hw_media_type; + struct ice_fc_info *hw_fc_info; + bool tx_pause, rx_pause; + struct ice_aq_desc desc; + enum ice_status status; + u16 cmd_flags; + + if (!pi) + return ICE_ERR_PARAM; + hw_link_info_old = &pi->phy.link_info_old; + hw_media_type = &pi->phy.media_type; + hw_link_info = &pi->phy.link_info; + hw_fc_info = &pi->fc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); + cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; + resp = &desc.params.get_link_status; + resp->cmd_flags = cpu_to_le16(cmd_flags); + resp->lport_num = pi->lport; + + status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), + cd); + + if (status) + return status; + + /* save off old link status information */ + *hw_link_info_old = *hw_link_info; + + /* update current link status information */ + hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); + hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); + *hw_media_type = ice_get_media_type(pi); + hw_link_info->link_info = link_data.link_info; + hw_link_info->an_info = link_data.an_info; + hw_link_info->ext_info = link_data.ext_info; + hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); + hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; + + /* update fc info */ + tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); + rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); + if (tx_pause && rx_pause) + hw_fc_info->current_mode = ICE_FC_FULL; + else if (tx_pause) + hw_fc_info->current_mode = ICE_FC_TX_PAUSE; + else if (rx_pause) + hw_fc_info->current_mode = ICE_FC_RX_PAUSE; + else + hw_fc_info->current_mode = ICE_FC_NONE; + + hw_link_info->lse_ena = + !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + + /* save link status information */ + if (link) + *link = *hw_link_info; + + /* flag cleared so calling functions don't call AQ again */ + pi->phy.get_link_info = false; + + return status; +} + +/** + * ice_init_flex_parser - initialize rx flex parser + * @hw: pointer to the hardware structure + * + * Function to initialize flex descriptors + */ +static void ice_init_flex_parser(struct ice_hw *hw) +{ + u8 idx = 0; + + ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); + ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); + ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); + ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); + ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); + ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); + ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, + idx++); + ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, + ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); + ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); +} + +/** + * ice_init_fltr_mgmt_struct - initializes filter management list and locks + * @hw: pointer to the hw struct + */ +static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +{ + struct ice_switch_info *sw; + + hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*hw->switch_info), GFP_KERNEL); + sw = hw->switch_info; + + if (!sw) + return ICE_ERR_NO_MEMORY; + + INIT_LIST_HEAD(&sw->vsi_list_map_head); + + mutex_init(&sw->mac_list_lock); + INIT_LIST_HEAD(&sw->mac_list_head); + + mutex_init(&sw->vlan_list_lock); + INIT_LIST_HEAD(&sw->vlan_list_head); + + mutex_init(&sw->eth_m_list_lock); + INIT_LIST_HEAD(&sw->eth_m_list_head); + + mutex_init(&sw->promisc_list_lock); + INIT_LIST_HEAD(&sw->promisc_list_head); + + mutex_init(&sw->mac_vlan_list_lock); + INIT_LIST_HEAD(&sw->mac_vlan_list_head); + + return 0; +} + +/** + * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks + * @hw: pointer to the hw struct + */ +static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_vsi_list_map_info *v_pos_map; + struct ice_vsi_list_map_info *v_tmp_map; + + list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, + list_entry) { + list_del(&v_pos_map->list_entry); + devm_kfree(ice_hw_to_dev(hw), v_pos_map); + } + + mutex_destroy(&sw->mac_list_lock); + mutex_destroy(&sw->vlan_list_lock); + mutex_destroy(&sw->eth_m_list_lock); + mutex_destroy(&sw->promisc_list_lock); + mutex_destroy(&sw->mac_vlan_list_lock); + + devm_kfree(ice_hw_to_dev(hw), sw); +} + +/** + * ice_init_hw - main hardware initialization routine + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw(struct ice_hw *hw) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + u16 mac_buf_len; + void *mac_buf; + + /* Set MAC type based on DeviceID */ + status = ice_set_mac_type(hw); + if (status) + return status; + + hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & + PF_FUNC_RID_FUNC_NUM_M) >> + PF_FUNC_RID_FUNC_NUM_S; + + status = ice_reset(hw, ICE_RESET_PFR); + if (status) + return status; + + /* set these values to minimum allowed */ + hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200; + hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100; + hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50; + hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25; + + status = ice_init_all_ctrlq(hw); + if (status) + goto err_unroll_cqinit; + + status = ice_clear_pf_cfg(hw); + if (status) + goto err_unroll_cqinit; + + ice_clear_pxe_mode(hw); + + status = ice_init_nvm(hw); + if (status) + goto err_unroll_cqinit; + + status = ice_get_caps(hw); + if (status) + goto err_unroll_cqinit; + + hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_cqinit; + } + + /* set the back pointer to hw */ + hw->port_info->hw = hw; + + /* Initialize port_info struct with switch configuration data */ + status = ice_get_initial_sw_cfg(hw); + if (status) + goto err_unroll_alloc; + + hw->evb_veb = true; + + /* Query the allocated resources for tx scheduler */ + status = ice_sched_query_res_alloc(hw); + if (status) { + ice_debug(hw, ICE_DBG_SCHED, + "Failed to get scheduler allocated resources\n"); + goto err_unroll_alloc; + } + + /* Initialize port_info struct with scheduler data */ + status = ice_sched_init_port(hw->port_info); + if (status) + goto err_unroll_sched; + + pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_sched; + } + + /* Initialize port_info struct with PHY capabilities */ + status = ice_aq_get_phy_caps(hw->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + devm_kfree(ice_hw_to_dev(hw), pcaps); + if (status) + goto err_unroll_sched; + + /* Initialize port_info struct with link information */ + status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); + if (status) + goto err_unroll_sched; + + status = ice_init_fltr_mgmt_struct(hw); + if (status) + goto err_unroll_sched; + + /* Get port MAC information */ + mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); + mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); + + if (!mac_buf) + goto err_unroll_fltr_mgmt_struct; + + status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); + devm_kfree(ice_hw_to_dev(hw), mac_buf); + + if (status) + goto err_unroll_fltr_mgmt_struct; + + ice_init_flex_parser(hw); + + return 0; + +err_unroll_fltr_mgmt_struct: + ice_cleanup_fltr_mgmt_struct(hw); +err_unroll_sched: + ice_sched_cleanup_all(hw); +err_unroll_alloc: + devm_kfree(ice_hw_to_dev(hw), hw->port_info); +err_unroll_cqinit: + ice_shutdown_all_ctrlq(hw); + return status; +} + +/** + * ice_deinit_hw - unroll initialization operations done by ice_init_hw + * @hw: pointer to the hardware structure + */ +void ice_deinit_hw(struct ice_hw *hw) +{ + ice_sched_cleanup_all(hw); + ice_shutdown_all_ctrlq(hw); + + if (hw->port_info) { + devm_kfree(ice_hw_to_dev(hw), hw->port_info); + hw->port_info = NULL; + } + + ice_cleanup_fltr_mgmt_struct(hw); +} + +/** + * ice_check_reset - Check to see if a global reset is complete + * @hw: pointer to the hardware structure + */ +enum ice_status ice_check_reset(struct ice_hw *hw) +{ + u32 cnt, reg = 0, grst_delay; + + /* Poll for Device Active state in case a recent CORER, GLOBR, + * or EMPR has occurred. The grst delay value is in 100ms units. + * Add 1sec for outstanding AQ commands that can take a long time. + */ + grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; + + for (cnt = 0; cnt < grst_delay; cnt++) { + mdelay(100); + reg = rd32(hw, GLGEN_RSTAT); + if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) + break; + } + + if (cnt == grst_delay) { + ice_debug(hw, ICE_DBG_INIT, + "Global reset polling failed to complete.\n"); + return ICE_ERR_RESET_FAILED; + } + +#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ + GLNVM_ULD_GLOBR_DONE_M) + + /* Device is Active; check Global Reset processes are done */ + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; + if (reg == ICE_RESET_DONE_MASK) { + ice_debug(hw, ICE_DBG_INIT, + "Global reset processes done. %d\n", cnt); + break; + } + mdelay(10); + } + + if (cnt == ICE_PF_RESET_WAIT_COUNT) { + ice_debug(hw, ICE_DBG_INIT, + "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", + reg); + return ICE_ERR_RESET_FAILED; + } + + return 0; +} + +/** + * ice_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * If a global reset has been triggered, this function checks + * for its completion and then issues the PF reset + */ +static enum ice_status ice_pf_reset(struct ice_hw *hw) +{ + u32 cnt, reg; + + /* If at function entry a global reset was already in progress, i.e. + * state is not 'device active' or any of the reset done bits are not + * set in GLNVM_ULD, there is no need for a PF Reset; poll until the + * global reset is done. + */ + if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || + (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { + /* poll on global reset currently in progress until done */ + if (ice_check_reset(hw)) + return ICE_ERR_RESET_FAILED; + + return 0; + } + + /* Reset the PF */ + reg = rd32(hw, PFGEN_CTRL); + + wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); + + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, PFGEN_CTRL); + if (!(reg & PFGEN_CTRL_PFSWR_M)) + break; + + mdelay(1); + } + + if (cnt == ICE_PF_RESET_WAIT_COUNT) { + ice_debug(hw, ICE_DBG_INIT, + "PF reset polling failed to complete.\n"); + return ICE_ERR_RESET_FAILED; + } + + return 0; +} + +/** + * ice_reset - Perform different types of reset + * @hw: pointer to the hardware structure + * @req: reset request + * + * This function triggers a reset as specified by the req parameter. + * + * Note: + * If anything other than a PF reset is triggered, PXE mode is restored. + * This has to be cleared using ice_clear_pxe_mode again, once the AQ + * interface has been restored in the rebuild flow. + */ +enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +{ + u32 val = 0; + + switch (req) { + case ICE_RESET_PFR: + return ice_pf_reset(hw); + case ICE_RESET_CORER: + ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); + val = GLGEN_RTRIG_CORER_M; + break; + case ICE_RESET_GLOBR: + ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); + val = GLGEN_RTRIG_GLOBR_M; + break; + } + + val |= rd32(hw, GLGEN_RTRIG); + wr32(hw, GLGEN_RTRIG, val); + ice_flush(hw); + + /* wait for the FW to be ready */ + return ice_check_reset(hw); +} + +/** + * ice_copy_rxq_ctx_to_hw + * @hw: pointer to the hardware structure + * @ice_rxq_ctx: pointer to the rxq context + * @rxq_index: the index of the rx queue + * + * Copies rxq context from dense structure to hw register space + */ +static enum ice_status +ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +{ + u8 i; + + if (!ice_rxq_ctx) + return ICE_ERR_BAD_PTR; + + if (rxq_index > QRX_CTRL_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Copy each dword separately to hw */ + for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + wr32(hw, QRX_CONTEXT(i, rxq_index), + *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, + *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + } + + return 0; +} + +/* LAN Rx Queue Context */ +static const struct ice_ctx_ele ice_rlan_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), + ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), + ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), + ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), + ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), + ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), + ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), + ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), + ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), + ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), + ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), + ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), + ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), + ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), + ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), + ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), + ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), + ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), + ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + { 0 } +}; + +/** + * ice_write_rxq_ctx + * @hw: pointer to the hardware structure + * @rlan_ctx: pointer to the rxq context + * @rxq_index: the index of the rx queue + * + * Converts rxq context from sparse to dense structure and then writes + * it to hw register space + */ +enum ice_status +ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index) +{ + u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); + return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); +} + +/* LAN Tx Queue Context */ +const struct ice_ctx_ele ice_tlan_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), + ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), + ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), + ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), + ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), + ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), + ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), + ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), + ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), + ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), + ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), + ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), + ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), + ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), + ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), + ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), + ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), + ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), + ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), + ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), + ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), + ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), + ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), + ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), + ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), + ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), + ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), + { 0 } +}; + +/** + * ice_debug_cq + * @hw: pointer to the hardware structure + * @mask: debug mask + * @desc: pointer to control queue descriptor + * @buf: pointer to command buffer + * @buf_len: max length of buf + * + * Dumps debug log about control command with descriptor contents. + */ +void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, + void *buf, u16 buf_len) +{ + struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; + u16 len; + +#ifndef CONFIG_DYNAMIC_DEBUG + if (!(mask & hw->debug_mask)) + return; +#endif + + if (!desc) + return; + + len = le16_to_cpu(cq_desc->datalen); + + ice_debug(hw, mask, + "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(cq_desc->opcode), + le16_to_cpu(cq_desc->flags), + le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); + ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->cookie_high), + le32_to_cpu(cq_desc->cookie_low)); + ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.param0), + le32_to_cpu(cq_desc->params.generic.param1)); + ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.addr_high), + le32_to_cpu(cq_desc->params.generic.addr_low)); + if (buf && cq_desc->datalen != 0) { + ice_debug(hw, mask, "Buffer:\n"); + if (buf_len < len) + len = buf_len; + + ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); + } +} + +/* FW Admin Queue command wrappers */ + +/** + * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue + * @hw: pointer to the hw struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * @cd: pointer to command details structure + * + * Helper function to send FW Admin Queue commands to the FW Admin Queue. + */ +enum ice_status +ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); +} + +/** + * ice_aq_get_fw_ver + * @hw: pointer to the hw struct + * @cd: pointer to command details structure or NULL + * + * Get the firmware version (0x0001) from the admin queue commands + */ +enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_ver *resp; + struct ice_aq_desc desc; + enum ice_status status; + + resp = &desc.params.get_ver; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + + if (!status) { + hw->fw_branch = resp->fw_branch; + hw->fw_maj_ver = resp->fw_major; + hw->fw_min_ver = resp->fw_minor; + hw->fw_patch = resp->fw_patch; + hw->fw_build = le32_to_cpu(resp->fw_build); + hw->api_branch = resp->api_branch; + hw->api_maj_ver = resp->api_major; + hw->api_min_ver = resp->api_minor; + hw->api_patch = resp->api_patch; + } + + return status; +} + +/** + * ice_aq_q_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well (0x0003). + */ +enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +{ + struct ice_aqc_q_shutdown *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.q_shutdown; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_req_res + * @hw: pointer to the hw struct + * @res: resource id + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cd: pointer to command details structure or NULL + * + * requests common resource using the admin queue commands (0x0008) + */ +static enum ice_status +ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, + struct ice_sq_cd *cd) +{ + struct ice_aqc_req_res *cmd_resp; + struct ice_aq_desc desc; + enum ice_status status; + + cmd_resp = &desc.params.res_owner; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); + + cmd_resp->res_id = cpu_to_le16(res); + cmd_resp->access_type = cpu_to_le16(access); + cmd_resp->res_number = cpu_to_le32(sdp_number); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + * If the resource is held by someone else, the command completes with + * busy return value and the timeout field indicates the maximum time + * the current owner of the resource has to free it. + */ + if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) + *timeout = le32_to_cpu(cmd_resp->timeout); + + return status; +} + +/** + * ice_aq_release_res + * @hw: pointer to the hw struct + * @res: resource id + * @sdp_number: resource number + * @cd: pointer to command details structure or NULL + * + * release common resource using the admin queue commands (0x0009) + */ +static enum ice_status +ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, + struct ice_sq_cd *cd) +{ + struct ice_aqc_req_res *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.res_owner; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); + + cmd->res_id = cpu_to_le16(res); + cmd->res_number = cpu_to_le32(sdp_number); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_acquire_res + * @hw: pointer to the HW structure + * @res: resource id + * @access: access type (read or write) + * + * This function will attempt to acquire the ownership of a resource. + */ +enum ice_status +ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access) +{ +#define ICE_RES_POLLING_DELAY_MS 10 + u32 delay = ICE_RES_POLLING_DELAY_MS; + enum ice_status status; + u32 time_left = 0; + u32 timeout; + + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); + + /* An admin queue return code of ICE_AQ_RC_EEXIST means that another + * driver has previously acquired the resource and performed any + * necessary updates; in this case the caller does not obtain the + * resource and has no further work to do. + */ + if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + status = ICE_ERR_AQ_NO_WORK; + goto ice_acquire_res_exit; + } + + if (status) + ice_debug(hw, ICE_DBG_RES, + "resource %d acquire type %d failed.\n", res, access); + + /* If necessary, poll until the current lock owner timeouts */ + timeout = time_left; + while (status && timeout && time_left) { + mdelay(delay); + timeout = (timeout > delay) ? timeout - delay : 0; + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); + + if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + /* lock free, but no work to do */ + status = ICE_ERR_AQ_NO_WORK; + break; + } + + if (!status) + /* lock acquired */ + break; + } + if (status && status != ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); + +ice_acquire_res_exit: + if (status == ICE_ERR_AQ_NO_WORK) { + if (access == ICE_RES_WRITE) + ice_debug(hw, ICE_DBG_RES, + "resource indicates no work to do.\n"); + else + ice_debug(hw, ICE_DBG_RES, + "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + } + return status; +} + +/** + * ice_release_res + * @hw: pointer to the HW structure + * @res: resource id + * + * This function will release a resource using the proper Admin Command. + */ +void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) +{ + enum ice_status status; + u32 total_delay = 0; + + status = ice_aq_release_res(hw, res, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((status == ICE_ERR_AQ_TIMEOUT) && + (total_delay < hw->adminq.sq_cmd_timeout)) { + mdelay(1); + status = ice_aq_release_res(hw, res, 0, NULL); + total_delay++; + } +} + +/** + * ice_parse_caps - parse function/device capabilities + * @hw: pointer to the hw struct + * @buf: pointer to a buffer containing function/device capability records + * @cap_count: number of capability records in the list + * @opc: type of capabilities list to parse + * + * Helper function to parse function(0x000a)/device(0x000b) capabilities list. + */ +static void +ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, + enum ice_adminq_opc opc) +{ + struct ice_aqc_list_caps_elem *cap_resp; + struct ice_hw_func_caps *func_p = NULL; + struct ice_hw_dev_caps *dev_p = NULL; + struct ice_hw_common_caps *caps; + u32 i; + + if (!buf) + return; + + cap_resp = (struct ice_aqc_list_caps_elem *)buf; + + if (opc == ice_aqc_opc_list_dev_caps) { + dev_p = &hw->dev_caps; + caps = &dev_p->common_cap; + } else if (opc == ice_aqc_opc_list_func_caps) { + func_p = &hw->func_caps; + caps = &func_p->common_cap; + } else { + ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); + return; + } + + for (i = 0; caps && i < cap_count; i++, cap_resp++) { + u32 logical_id = le32_to_cpu(cap_resp->logical_id); + u32 phys_id = le32_to_cpu(cap_resp->phys_id); + u32 number = le32_to_cpu(cap_resp->number); + u16 cap = le16_to_cpu(cap_resp->cap); + + switch (cap) { + case ICE_AQC_CAPS_VSI: + if (dev_p) { + dev_p->num_vsi_allocd_to_host = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Dev.VSI cnt = %d\n", + dev_p->num_vsi_allocd_to_host); + } else if (func_p) { + func_p->guaranteed_num_vsi = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Func.VSI cnt = %d\n", + func_p->guaranteed_num_vsi); + } + break; + case ICE_AQC_CAPS_RSS: + caps->rss_table_size = number; + caps->rss_table_entry_width = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: RSS table size = %d\n", + caps->rss_table_size); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: RSS table width = %d\n", + caps->rss_table_entry_width); + break; + case ICE_AQC_CAPS_RXQS: + caps->num_rxq = number; + caps->rxq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Num Rx Qs = %d\n", caps->num_rxq); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Rx first queue ID = %d\n", + caps->rxq_first_id); + break; + case ICE_AQC_CAPS_TXQS: + caps->num_txq = number; + caps->txq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Num Tx Qs = %d\n", caps->num_txq); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Tx first queue ID = %d\n", + caps->txq_first_id); + break; + case ICE_AQC_CAPS_MSIX: + caps->num_msix_vectors = number; + caps->msix_vector_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: MSIX vector count = %d\n", + caps->num_msix_vectors); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: MSIX first vector index = %d\n", + caps->msix_vector_first_id); + break; + case ICE_AQC_CAPS_MAX_MTU: + caps->max_mtu = number; + if (dev_p) + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Dev.MaxMTU = %d\n", + caps->max_mtu); + else if (func_p) + ice_debug(hw, ICE_DBG_INIT, + "HW caps: func.MaxMTU = %d\n", + caps->max_mtu); + break; + default: + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Unknown capability[%d]: 0x%x\n", i, + cap); + break; + } + } +} + +/** + * ice_aq_discover_caps - query function/device capabilities + * @hw: pointer to the hw struct + * @buf: a virtual buffer to hold the capabilities + * @buf_size: Size of the virtual buffer + * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM + * @opc: capabilities type to discover - pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Get the function(0x000a)/device(0x000b) capabilities description from + * the firmware. + */ +static enum ice_status +ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aqc_list_caps *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_cap; + + if (opc != ice_aqc_opc_list_func_caps && + opc != ice_aqc_opc_list_dev_caps) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) + ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); + *data_size = le16_to_cpu(desc.datalen); + + return status; +} + +/** + * ice_get_caps - get info about the HW + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_caps(struct ice_hw *hw) +{ + enum ice_status status; + u16 data_size = 0; + u16 cbuf_len; + u8 retries; + + /* The driver doesn't know how many capabilities the device will return + * so the buffer size required isn't known ahead of time. The driver + * starts with cbuf_len and if this turns out to be insufficient, the + * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs. + * The driver then allocates the buffer of this size and retries the + * operation. So it follows that the retry count is 2. + */ +#define ICE_GET_CAP_BUF_COUNT 40 +#define ICE_GET_CAP_RETRY_COUNT 2 + + cbuf_len = ICE_GET_CAP_BUF_COUNT * + sizeof(struct ice_aqc_list_caps_elem); + + retries = ICE_GET_CAP_RETRY_COUNT; + + do { + void *cbuf; + + cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); + if (!cbuf) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size, + ice_aqc_opc_list_func_caps, NULL); + devm_kfree(ice_hw_to_dev(hw), cbuf); + + if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) + break; + + /* If ENOMEM is returned, try again with bigger buffer */ + cbuf_len = data_size; + } while (--retries); + + return status; +} + +/** + * ice_aq_manage_mac_write - manage MAC address write command + * @hw: pointer to the hw struct + * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address + * @flags: flags to control write behavior + * @cd: pointer to command details structure or NULL + * + * This function is used to write MAC address to the NVM (0x0108). + */ +enum ice_status +ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, + struct ice_sq_cd *cd) +{ + struct ice_aqc_manage_mac_write *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.mac_write; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); + + cmd->flags = flags; + + /* Prep values for flags, sah, sal */ + cmd->sah = htons(*((u16 *)mac_addr)); + cmd->sal = htonl(*((u32 *)(mac_addr + 2))); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_clear_pxe_mode + * @hw: pointer to the hw struct + * + * Tell the firmware that the driver is taking over from PXE (0x0110). + */ +static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); + desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the hw struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + */ +void ice_clear_pxe_mode(struct ice_hw *hw) +{ + if (ice_check_sq_alive(hw, &hw->adminq)) + ice_aq_clear_pxe_mode(hw); +} + +/** + * ice_aq_set_phy_cfg + * @hw: pointer to the hw struct + * @lport: logical port number + * @cfg: structure with PHY configuration data to be set + * @cd: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters supported on the Port. + * One or more of the Set PHY config parameters may be ignored in an MFP + * mode as the PF may not have the privilege to set some of the PHY Config + * parameters. This status will be indicated by the command response (0x0601). + */ +static enum ice_status +ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_phy_cfg *cmd; + struct ice_aq_desc desc; + + if (!cfg) + return ICE_ERR_PARAM; + + cmd = &desc.params.set_phy; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); + cmd->lport_num = lport; + + return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); +} + +/** + * ice_update_link_info - update status of the HW network link + * @pi: port info structure of the interested logical port + */ +static enum ice_status +ice_update_link_info(struct ice_port_info *pi) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_phy_info *phy_info; + enum ice_status status; + struct ice_hw *hw; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + phy_info = &pi->phy; + status = ice_aq_get_link_info(pi, true, NULL, NULL); + if (status) + goto out; + + if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + pcaps, NULL); + if (status) + goto out; + + memcpy(phy_info->link_info.module_type, &pcaps->module_type, + sizeof(phy_info->link_info.module_type)); + } +out: + devm_kfree(ice_hw_to_dev(hw), pcaps); + return status; +} + +/** + * ice_set_fc + * @pi: port information structure + * @aq_failures: pointer to status code, specific to ice_set_fc routine + * @atomic_restart: enable automatic link update + * + * Set the requested flow control mode. + */ +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) +{ + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + u8 pause_mask = 0x0; + struct ice_hw *hw; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; + + switch (pi->fc.req_mode) { + case ICE_FC_FULL: + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; + case ICE_FC_RX_PAUSE: + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; + case ICE_FC_TX_PAUSE: + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; + break; + default: + break; + } + + pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + /* Get the current phy config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + *aq_failures = ICE_SET_FC_AQ_FAIL_GET; + goto out; + } + + /* clear the old pause settings */ + cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | + ICE_AQC_PHY_EN_RX_LINK_PAUSE); + /* set the new capabilities */ + cfg.caps |= pause_mask; + /* If the capabilities have changed, then set the new config */ + if (cfg.caps != pcaps->caps) { + int retry_count, retry_max = 10; + + /* Auto restart link so settings take effect */ + if (atomic_restart) + cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; + /* Copy over all the old settings */ + cfg.phy_type_low = pcaps->phy_type_low; + cfg.low_power_ctrl = pcaps->low_power_ctrl; + cfg.eee_cap = pcaps->eee_cap; + cfg.eeer_value = pcaps->eeer_value; + cfg.link_fec_opt = pcaps->link_fec_options; + + status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); + if (status) { + *aq_failures = ICE_SET_FC_AQ_FAIL_SET; + goto out; + } + + /* Update the link info + * It sometimes takes a really long time for link to + * come back from the atomic reset. Thus, we wait a + * little bit. + */ + for (retry_count = 0; retry_count < retry_max; retry_count++) { + status = ice_update_link_info(pi); + + if (!status) + break; + + mdelay(100); + } + + if (status) + *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; + } + +out: + devm_kfree(ice_hw_to_dev(hw), pcaps); + return status; +} + +/** + * ice_get_link_status - get status of the HW network link + * @pi: port information structure + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up is true if link is up, false if link is down. + * The variable link_up is invalid if status is non zero. As a + * result of this call, link status reporting becomes enabled + */ +enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +{ + struct ice_phy_info *phy_info; + enum ice_status status = 0; + + if (!pi) + return ICE_ERR_PARAM; + + phy_info = &pi->phy; + + if (phy_info->get_link_info) { + status = ice_update_link_info(pi); + + if (status) + ice_debug(pi->hw, ICE_DBG_LINK, + "get link status error, status = %d\n", + status); + } + + *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; + + return status; +} + +/** + * ice_aq_set_link_restart_an + * @pi: pointer to the port information structure + * @ena_link: if true: enable link, if false: disable link + * @cd: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + */ +enum ice_status +ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, + struct ice_sq_cd *cd) +{ + struct ice_aqc_restart_an *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.restart_an; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); + + cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; + cmd->lport_num = pi->lport; + if (ena_link) + cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; + else + cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; + + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_event_mask + * @hw: pointer to the hw struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * @cd: pointer to command details structure or NULL + * + * Set event mask (0x0613) + */ +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_event_mask *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_event_mask; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = cpu_to_le16(mask); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * __ice_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: VSI FW index + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * @glob_lut_idx: global LUT index + * @set: set true to set the table, false to get the table + * + * Internal function to get (0x0B05) or set (0x0B03) RSS look up table + */ +static enum ice_status +__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size, u8 glob_lut_idx, bool set) +{ + struct ice_aqc_get_set_rss_lut *cmd_resp; + struct ice_aq_desc desc; + enum ice_status status; + u16 flags = 0; + + cmd_resp = &desc.params.get_set_rss_lut; + + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); + } + + cmd_resp->vsi_id = cpu_to_le16(((vsi_id << + ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & + ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | + ICE_AQC_GSET_RSS_LUT_VSI_VALID); + + switch (lut_type) { + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: + flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); + break; + default: + status = ICE_ERR_PARAM; + goto ice_aq_get_set_rss_lut_exit; + } + + if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { + flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & + ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); + + if (!set) + goto ice_aq_get_set_rss_lut_send; + } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { + if (!set) + goto ice_aq_get_set_rss_lut_send; + } else { + goto ice_aq_get_set_rss_lut_send; + } + + /* LUT size is only valid for Global and PF table types */ + if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && + (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + } else { + status = ICE_ERR_PARAM; + goto ice_aq_get_set_rss_lut_exit; + } + +ice_aq_get_set_rss_lut_send: + cmd_resp->flags = cpu_to_le16(flags); + status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); + +ice_aq_get_set_rss_lut_exit: + return status; +} + +/** + * ice_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: VSI FW index + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * + * get the RSS lookup table, PF or VSI type + */ +enum ice_status +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size) +{ + return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, + false); +} + +/** + * ice_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: VSI FW index + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * + * set the RSS lookup table, PF or VSI type + */ +enum ice_status +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size) +{ + return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, + true); +} + +/** + * __ice_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: VSI FW index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get (0x0B04) or set (0x0B02) the RSS key per VSI + */ +static enum +ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key, + bool set) +{ + struct ice_aqc_get_set_rss_key *cmd_resp; + u16 key_size = sizeof(*key); + struct ice_aq_desc desc; + + cmd_resp = &desc.params.get_set_rss_key; + + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); + } + + cmd_resp->vsi_id = cpu_to_le16(((vsi_id << + ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & + ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | + ICE_AQC_GSET_RSS_KEY_VSI_VALID); + + return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); +} + +/** + * ice_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: VSI FW index + * @key: pointer to key info struct + * + * get the RSS key per VSI + */ +enum ice_status +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key) +{ + return __ice_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * ice_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: VSI FW index + * @keys: pointer to key info struct + * + * set the RSS key per VSI + */ +enum ice_status +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *keys) +{ + return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true); +} + +/** + * ice_aq_add_lan_txq + * @hw: pointer to the hardware structure + * @num_qgrps: Number of added queue groups + * @qg_list: list of queue groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * Add Tx LAN queue (0x0C30) + * + * NOTE: + * Prior to calling add Tx LAN queue: + * Initialize the following as part of the Tx queue context: + * Completion queue ID if the queue uses Completion queue, Quanta profile, + * Cache profile and Packet shaper profile. + * + * After add Tx LAN queue AQ command is completed: + * Interrupts should be associated with specific queues, + * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue + * flow. + */ +static enum ice_status +ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, + struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, + struct ice_sq_cd *cd) +{ + u16 i, sum_header_size, sum_q_size = 0; + struct ice_aqc_add_tx_qgrp *list; + struct ice_aqc_add_txqs *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.add_txqs; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); + + if (!qg_list) + return ICE_ERR_PARAM; + + if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) + return ICE_ERR_PARAM; + + sum_header_size = num_qgrps * + (sizeof(*qg_list) - sizeof(*qg_list->txqs)); + + list = qg_list; + for (i = 0; i < num_qgrps; i++) { + struct ice_aqc_add_txqs_perq *q = list->txqs; + + sum_q_size += list->num_txqs * sizeof(*q); + list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); + } + + if (buf_size != (sum_header_size + sum_q_size)) + return ICE_ERR_PARAM; + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_qgrps = num_qgrps; + + return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); +} + +/** + * ice_aq_dis_lan_txq + * @hw: pointer to the hardware structure + * @num_qgrps: number of groups in the list + * @qg_list: the list of groups to disable + * @buf_size: the total size of the qg_list buffer in bytes + * @cd: pointer to command details structure or NULL + * + * Disable LAN Tx queue (0x0C31) + */ +static enum ice_status +ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, + struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_dis_txqs *cmd; + struct ice_aq_desc desc; + u16 i, sz = 0; + + cmd = &desc.params.dis_txqs; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); + + if (!qg_list) + return ICE_ERR_PARAM; + + if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) + return ICE_ERR_PARAM; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_entries = num_qgrps; + + for (i = 0; i < num_qgrps; ++i) { + /* Calculate the size taken up by the queue IDs in this group */ + sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); + + /* Add the size of the group header */ + sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); + + /* If the num of queues is even, add 2 bytes of padding */ + if ((qg_list[i].num_qs % 2) == 0) + sz += 2; + } + + if (buf_size != sz) + return ICE_ERR_PARAM; + + return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); +} + +/* End of FW Admin Queue command wrappers */ + +/** + * ice_write_byte - write a byte to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + memcpy(&dest_byte, dest, sizeof(dest_byte)); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_byte, sizeof(dest_byte)); +} + +/** + * ice_write_word - write a word to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) +{ + u16 src_word, mask; + __le16 dest_word; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + memcpy(&dest_word, dest, sizeof(dest_word)); + + dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ + dest_word |= cpu_to_le16(src_word); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_word, sizeof(dest_word)); +} + +/** + * ice_write_dword - write a dword to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) +{ + u32 src_dword, mask; + __le32 dest_dword; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = (u32)~0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + memcpy(&dest_dword, dest, sizeof(dest_dword)); + + dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ + dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_dword, sizeof(dest_dword)); +} + +/** + * ice_write_qword - write a qword to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) +{ + u64 src_qword, mask; + __le64 dest_qword; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = (u64)~0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + memcpy(&dest_qword, dest, sizeof(dest_qword)); + + dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ + dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_qword, sizeof(dest_qword)); +} + +/** + * ice_set_ctx - set context bits in packed structure + * @src_ctx: pointer to a generic non-packed context structure + * @dest_ctx: pointer to memory for the packed structure + * @ce_info: a description of the structure to be transformed + */ +enum ice_status +ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + int f; + + for (f = 0; ce_info[f].width; f++) { + /* We have to deal with each element of the FW response + * using the correct size so that we are correct regardless + * of the endianness of the machine. + */ + switch (ce_info[f].size_of) { + case sizeof(u8): + ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u16): + ice_write_word(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u32): + ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u64): + ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); + break; + default: + return ICE_ERR_INVAL_SIZE; + } + } + + return 0; +} + +/** + * ice_ena_vsi_txq + * @pi: port information structure + * @vsi_id: VSI id + * @tc: tc number + * @num_qgrps: Number of added queue groups + * @buf: list of queue groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * This function adds one lan q + */ +enum ice_status +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, + struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_txsched_elem_data node = { 0 }; + struct ice_sched_node *parent; + enum ice_status status; + struct ice_hw *hw; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + if (num_qgrps > 1 || buf->num_txqs > 1) + return ICE_ERR_MAX_LIMIT; + + hw = pi->hw; + + mutex_lock(&pi->sched_lock); + + /* find a parent node */ + parent = ice_sched_get_free_qparent(pi, vsi_id, tc, + ICE_SCHED_NODE_OWNER_LAN); + if (!parent) { + status = ICE_ERR_PARAM; + goto ena_txq_exit; + } + buf->parent_teid = parent->info.node_teid; + node.parent_teid = parent->info.node_teid; + /* Mark that the values in the "generic" section as valid. The default + * value in the "generic" section is zero. This means that : + * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. + * - 0 priority among siblings, indicated by Bit 1-3. + * - WFQ, indicated by Bit 4. + * - 0 Adjustment value is used in PSM credit update flow, indicated by + * Bit 5-6. + * - Bit 7 is reserved. + * Without setting the generic section as valid in valid_sections, the + * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. + */ + buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + + /* add the lan q */ + status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); + if (status) + goto ena_txq_exit; + + node.node_teid = buf->txqs[0].q_teid; + node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; + + /* add a leaf node into schduler tree q layer */ + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); + +ena_txq_exit: + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_dis_vsi_txq + * @pi: port information structure + * @num_queues: number of queues + * @q_ids: pointer to the q_id array + * @q_teids: pointer to queue node teids + * @cd: pointer to command details structure or NULL + * + * This function removes queues and their corresponding nodes in SW DB + */ +enum ice_status +ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, + u32 *q_teids, struct ice_sq_cd *cd) +{ + enum ice_status status = ICE_ERR_DOES_NOT_EXIST; + struct ice_aqc_dis_txq_item qg_list; + u16 i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + mutex_lock(&pi->sched_lock); + + for (i = 0; i < num_queues; i++) { + struct ice_sched_node *node; + + node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); + if (!node) + continue; + qg_list.parent_teid = node->info.parent_teid; + qg_list.num_qs = 1; + qg_list.q_id[0] = cpu_to_le16(q_ids[i]); + status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, + sizeof(qg_list), cd); + + if (status) + break; + ice_free_sched_node(pi, node); + } + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_qs - configure the new/exisiting VSI queues + * @pi: port information structure + * @vsi_id: VSI Id + * @tc_bitmap: TC bitmap + * @maxqs: max queues array per TC + * @owner: lan or rdma + * + * This function adds/updates the VSI queues per TC. + */ +static enum ice_status +ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, + u16 *maxqs, u8 owner) +{ + enum ice_status status = 0; + u8 i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + mutex_lock(&pi->sched_lock); + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + /* configuration is possible only if TC node is present */ + if (!ice_sched_get_tc_node(pi, i)) + continue; + + status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner, + ice_is_tc_ena(tc_bitmap, i)); + if (status) + break; + } + + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_lan - configure VSI lan queues + * @pi: port information structure + * @vsi_id: VSI Id + * @tc_bitmap: TC bitmap + * @max_lanqs: max lan queues array per TC + * + * This function adds/updates the VSI lan queues per TC. + */ +enum ice_status +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, + u16 *max_lanqs) +{ + return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs, + ICE_SCHED_NODE_OWNER_LAN); +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h new file mode 100644 index 000000000000..9a5519130af1 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_COMMON_H_ +#define _ICE_COMMON_H_ + +#include "ice.h" +#include "ice_type.h" +#include "ice_switch.h" + +void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, + u16 buf_len); +enum ice_status ice_init_hw(struct ice_hw *hw); +void ice_deinit_hw(struct ice_hw *hw); +enum ice_status ice_check_reset(struct ice_hw *hw); +enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw); +enum ice_status +ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_rq_event_info *e, u16 *pending); +enum ice_status +ice_get_link_status(struct ice_port_info *pi, bool *link_up); +enum ice_status +ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access); +void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); +enum ice_status ice_init_nvm(struct ice_hw *hw); +enum ice_status +ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd); +void ice_clear_pxe_mode(struct ice_hw *hw); +enum ice_status ice_get_caps(struct ice_hw *hw); +enum ice_status +ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index); + +enum ice_status +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size); +enum ice_status +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size); +enum ice_status +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *keys); +enum ice_status +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *keys); +bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); +enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); +extern const struct ice_ctx_ele ice_tlan_ctx_info[]; +enum ice_status +ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); +enum ice_status +ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, + void *buf, u16 buf_size, struct ice_sq_cd *cd); +enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +enum ice_status +ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, + struct ice_sq_cd *cd); +enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart); +enum ice_status +ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd); +enum ice_status +ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, + u32 *q_teids, struct ice_sq_cd *cmd_details); +enum ice_status +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, + u16 *max_lanqs); +enum ice_status +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, + struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, + struct ice_sq_cd *cd); +#endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c new file mode 100644 index 000000000000..5909a4407e38 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -0,0 +1,1066 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" + +/** + * ice_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_adminq_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + + cq->sq.head = PF_FW_ATQH; + cq->sq.tail = PF_FW_ATQT; + cq->sq.len = PF_FW_ATQLEN; + cq->sq.bah = PF_FW_ATQBAH; + cq->sq.bal = PF_FW_ATQBAL; + cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; + cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; + cq->sq.head_mask = PF_FW_ATQH_ATQH_M; + + cq->rq.head = PF_FW_ARQH; + cq->rq.tail = PF_FW_ARQT; + cq->rq.len = PF_FW_ARQLEN; + cq->rq.bah = PF_FW_ARQBAH; + cq->rq.bal = PF_FW_ARQBAL; + cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; + cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; + cq->rq.head_mask = PF_FW_ARQH_ARQH_M; +} + +/** + * ice_check_sq_alive + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * + * Returns true if Queue is enabled else false. + */ +bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* check both queue-length and queue-enable fields */ + if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) + return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | + cq->sq.len_ena_mask)) == + (cq->num_sq_entries | cq->sq.len_ena_mask); + + return false; +} + +/** + * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); + + cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, + &cq->sq.desc_buf.pa, + GFP_KERNEL | __GFP_ZERO); + if (!cq->sq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + cq->sq.desc_buf.size = size; + + cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, + sizeof(struct ice_sq_cd), GFP_KERNEL); + if (!cq->sq.cmd_buf) { + dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, + cq->sq.desc_buf.va, cq->sq.desc_buf.pa); + cq->sq.desc_buf.va = NULL; + cq->sq.desc_buf.pa = 0; + cq->sq.desc_buf.size = 0; + return ICE_ERR_NO_MEMORY; + } + + return 0; +} + +/** + * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); + + cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, + &cq->rq.desc_buf.pa, + GFP_KERNEL | __GFP_ZERO); + if (!cq->rq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + cq->rq.desc_buf.size = size; + return 0; +} + +/** + * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + */ +static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, + cq->sq.desc_buf.va, cq->sq.desc_buf.pa); + cq->sq.desc_buf.va = NULL; + cq->sq.desc_buf.pa = 0; + cq->sq.desc_buf.size = 0; +} + +/** + * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + */ +static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, + cq->rq.desc_buf.va, cq->rq.desc_buf.pa); + cq->rq.desc_buf.va = NULL; + cq->rq.desc_buf.pa = 0; + cq->rq.desc_buf.size = 0; +} + +/** + * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, + sizeof(cq->rq.desc_buf), GFP_KERNEL); + if (!cq->rq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_rq_entries; i++) { + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + + bi = &cq->rq.r.rq_bi[i]; + bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), + cq->rq_buf_size, &bi->pa, + GFP_KERNEL | __GFP_ZERO); + if (!bi->va) + goto unwind_alloc_rq_bufs; + bi->size = cq->rq_buf_size; + + /* now configure the descriptors for use */ + desc = ICE_CTL_Q_DESC(cq->rq, i); + + desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16(bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.generic.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.generic.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.generic.param0 = 0; + desc->params.generic.param1 = 0; + } + return 0; + +unwind_alloc_rq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) { + dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, + cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); + cq->rq.r.rq_bi[i].va = NULL; + cq->rq.r.rq_bi[i].pa = 0; + cq->rq.r.rq_bi[i].size = 0; + } + devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, + sizeof(cq->sq.desc_buf), GFP_KERNEL); + if (!cq->sq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_sq_entries; i++) { + struct ice_dma_mem *bi; + + bi = &cq->sq.r.sq_bi[i]; + bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), + cq->sq_buf_size, &bi->pa, + GFP_KERNEL | __GFP_ZERO); + if (!bi->va) + goto unwind_alloc_sq_bufs; + bi->size = cq->sq_buf_size; + } + return 0; + +unwind_alloc_sq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) { + dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, + cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); + cq->sq.r.sq_bi[i].va = NULL; + cq->sq.r.sq_bi[i].pa = 0; + cq->sq.r.sq_bi[i].size = 0; + } + devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_free_rq_bufs - Free ARQ buffer info elements + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* free descriptors */ + for (i = 0; i < cq->num_rq_entries; i++) { + dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, + cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); + cq->rq.r.rq_bi[i].va = NULL; + cq->rq.r.rq_bi[i].pa = 0; + cq->rq.r.rq_bi[i].size = 0; + } + + /* free the dma header */ + devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); +} + +/** + * ice_free_sq_bufs - Free ATQ buffer info elements + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < cq->num_sq_entries; i++) + if (cq->sq.r.sq_bi[i].pa) { + dmam_free_coherent(ice_hw_to_dev(hw), + cq->sq.r.sq_bi[i].size, + cq->sq.r.sq_bi[i].va, + cq->sq.r.sq_bi[i].pa); + cq->sq.r.sq_bi[i].va = NULL; + cq->sq.r.sq_bi[i].pa = 0; + cq->sq.r.sq_bi[i].size = 0; + } + + /* free the buffer info list */ + devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); + + /* free the dma header */ + devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); +} + +/** + * ice_cfg_sq_regs - configure Control ATQ registers + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the transmit queue + */ +static enum ice_status +ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, cq->sq.head, 0); + wr32(hw, cq->sq.tail, 0); + + /* set starting point */ + wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); + wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); + wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, cq->sq.bal); + if (reg != lower_32_bits(cq->sq.desc_buf.pa)) + return ICE_ERR_AQ_ERROR; + + return 0; +} + +/** + * ice_cfg_rq_regs - configure Control ARQ register + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the receive (event q) + */ +static enum ice_status +ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, cq->rq.head, 0); + wr32(hw, cq->rq.tail, 0); + + /* set starting point */ + wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); + wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); + wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, cq->rq.bal); + if (reg != lower_32_bits(cq->rq.desc_buf.pa)) + return ICE_ERR_AQ_ERROR; + + return 0; +} + +/** + * ice_init_sq - main initialization routine for Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * This is the main initialization routine for the Control Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->sq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + if (cq->sq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_sq_entries || !cq->sq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->sq.next_to_use = 0; + cq->sq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_sq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_sq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->sq.count = cq->num_sq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_ctrlq_sq_ring(hw, cq); + +init_ctrlq_exit: + return ret_code; +} + +/** + * ice_init_rq - initialize ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_rq_entries + * - cq->rq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + if (cq->rq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->rq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->rq.next_to_use = 0; + cq->rq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_rq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_rq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->rq.count = cq->num_rq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_ctrlq_rq_ring(hw, cq); + +init_ctrlq_exit: + return ret_code; +} + +/** + * ice_shutdown_sq - shutdown the Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Transmit Queue + */ +static enum ice_status +ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = 0; + + mutex_lock(&cq->sq_lock); + + if (!cq->sq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_sq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, cq->sq.head, 0); + wr32(hw, cq->sq.tail, 0); + wr32(hw, cq->sq.len, 0); + wr32(hw, cq->sq.bal, 0); + wr32(hw, cq->sq.bah, 0); + + cq->sq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers and the ring itself */ + ice_free_sq_bufs(hw, cq); + ice_free_ctrlq_sq_ring(hw, cq); + +shutdown_sq_out: + mutex_unlock(&cq->sq_lock); + return ret_code; +} + +/** + * ice_aq_ver_check - Check the reported AQ API version. + * @fw_branch: The "branch" of FW, typically describes the device type + * @fw_major: The major version of the FW API + * @fw_minor: The minor version increment of the FW API + * + * Checks if the driver should load on a given AQ API version. + * + * Return: 'true' iff the driver should attempt to load. 'false' otherwise. + */ +static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) +{ + if (fw_branch != EXP_FW_API_VER_BRANCH) + return false; + if (fw_major != EXP_FW_API_VER_MAJOR) + return false; + if (fw_minor != EXP_FW_API_VER_MINOR) + return false; + return true; +} + +/** + * ice_shutdown_rq - shutdown Control ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Receive Queue + */ +static enum ice_status +ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = 0; + + mutex_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_rq_out; + } + + /* Stop Control Queue processing */ + wr32(hw, cq->rq.head, 0); + wr32(hw, cq->rq.tail, 0); + wr32(hw, cq->rq.len, 0); + wr32(hw, cq->rq.bal, 0); + wr32(hw, cq->rq.bah, 0); + + /* set rq.count to 0 to indicate uninitialized queue */ + cq->rq.count = 0; + + /* free ring buffers and the ring itself */ + ice_free_rq_bufs(hw, cq); + ice_free_ctrlq_rq_ring(hw, cq); + +shutdown_rq_out: + mutex_unlock(&cq->rq_lock); + return ret_code; +} + +/** + * ice_init_check_adminq - Check version for Admin Queue to know if its alive + * @hw: pointer to the hardware structure + */ +static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + enum ice_status status; + + status = ice_aq_get_fw_ver(hw, NULL); + if (status) + goto init_ctrlq_free_rq; + + if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, + hw->api_min_ver)) { + status = ICE_ERR_FW_API_VER; + goto init_ctrlq_free_rq; + } + + return 0; + +init_ctrlq_free_rq: + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); + return status; +} + +/** + * ice_init_ctrlq - main initialization routine for any control Queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + */ +static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + enum ice_status ret_code; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + ice_adminq_init_regs(hw); + cq = &hw->adminq; + break; + default: + return ICE_ERR_PARAM; + } + cq->qtype = q_type; + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->num_sq_entries || + !cq->rq_buf_size || !cq->sq_buf_size) { + return ICE_ERR_CFG; + } + mutex_init(&cq->sq_lock); + mutex_init(&cq->rq_lock); + + /* setup SQ command write back timeout */ + cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; + + /* allocate the ATQ */ + ret_code = ice_init_sq(hw, cq); + if (ret_code) + goto init_ctrlq_destroy_locks; + + /* allocate the ARQ */ + ret_code = ice_init_rq(hw, cq); + if (ret_code) + goto init_ctrlq_free_sq; + + /* success! */ + return 0; + +init_ctrlq_free_sq: + ice_shutdown_sq(hw, cq); +init_ctrlq_destroy_locks: + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); + return ret_code; +} + +/** + * ice_init_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + */ +enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +{ + enum ice_status ret_code; + + /* Init FW admin queue */ + ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); + if (ret_code) + return ret_code; + + return ice_init_check_adminq(hw); +} + +/** + * ice_shutdown_ctrlq - shutdown routine for any control queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + */ +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + cq = &hw->adminq; + if (ice_check_sq_alive(hw, cq)) + ice_aq_q_shutdown(hw, true); + break; + default: + return; + } + + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); +} + +/** + * ice_shutdown_all_ctrlq - shutdown routine for all control queues + * @hw: pointer to the hardware structure + */ +void ice_shutdown_all_ctrlq(struct ice_hw *hw) +{ + /* Shutdown FW admin queue */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); +} + +/** + * ice_clean_sq - cleans Admin send queue (ATQ) + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * returns the number of free desc + */ +static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + struct ice_ctl_q_ring *sq = &cq->sq; + u16 ntc = sq->next_to_clean; + struct ice_sq_cd *details; + struct ice_aq_desc *desc; + + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + + while (rd32(hw, cq->sq.head) != ntc) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); + ntc++; + if (ntc == sq->count) + ntc = 0; + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + } + + sq->next_to_clean = ntc; + + return ICE_CTL_Q_DESC_UNUSED(sq); +} + +/** + * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + */ +static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, cq->sq.head) == cq->sq.next_to_use; +} + +/** + * ice_sq_send_cmd - send command to Control Queue (ATQ) + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * This is the main send command routine for the ATQ. It runs the q, + * cleans the queue, etc. + */ +enum ice_status +ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_dma_mem *dma_buf = NULL; + struct ice_aq_desc *desc_on_ring; + bool cmd_completed = false; + enum ice_status status = 0; + struct ice_sq_cd *details; + u32 total_delay = 0; + u16 retval = 0; + u32 val = 0; + + mutex_lock(&cq->sq_lock); + + cq->sq_last_status = ICE_AQ_RC_OK; + + if (!cq->sq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send queue not initialized.\n"); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + if ((buf && !buf_size) || (!buf && buf_size)) { + status = ICE_ERR_PARAM; + goto sq_send_command_error; + } + + if (buf) { + if (buf_size > cq->sq_buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Invalid buffer size for Control Send queue: %d.\n", + buf_size); + status = ICE_ERR_INVAL_SIZE; + goto sq_send_command_error; + } + + desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); + if (buf_size > ICE_AQ_LG_BUF) + desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); + } + + val = rd32(hw, cq->sq.head); + if (val >= cq->num_sq_entries) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "head overrun at %d in the Control Send Queue ring\n", + val); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); + if (cd) + memcpy(details, cd, sizeof(*details)); + else + memset(details, 0, sizeof(*details)); + + /* Call clean and check queue available function to reclaim the + * descriptors that were processed by FW/MBX; the function returns the + * number of desc available. The clean function called here could be + * called in a separate thread in case of asynchronous completions. + */ + if (ice_clean_sq(hw, cq) == 0) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Error: Control Send Queue is full.\n"); + status = ICE_ERR_AQ_FULL; + goto sq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); + + /* if buf is not NULL assume indirect command */ + if (buf) { + dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; + /* copy the user buf into the respective DMA buf */ + memcpy(dma_buf->va, buf, buf_size); + desc_on_ring->datalen = cpu_to_le16(buf_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.generic.addr_high = + cpu_to_le32(upper_32_bits(dma_buf->pa)); + desc_on_ring->params.generic.addr_low = + cpu_to_le32(lower_32_bits(dma_buf->pa)); + } + + /* Debug desc and buffer */ + ice_debug(hw, ICE_DBG_AQ_MSG, + "ATQ: Control Send queue desc and buffer:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); + + (cq->sq.next_to_use)++; + if (cq->sq.next_to_use == cq->sq.count) + cq->sq.next_to_use = 0; + wr32(hw, cq->sq.tail, cq->sq.next_to_use); + + do { + if (ice_sq_done(hw, cq)) + break; + + mdelay(1); + total_delay++; + } while (total_delay < cq->sq_cmd_timeout); + + /* if ready, copy the desc back to temp */ + if (ice_sq_done(hw, cq)) { + memcpy(desc, desc_on_ring, sizeof(*desc)); + if (buf) { + /* get returned length to copy */ + u16 copy_size = le16_to_cpu(desc->datalen); + + if (copy_size > buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Return len %d > than buf len %d\n", + copy_size, buf_size); + status = ICE_ERR_AQ_ERROR; + } else { + memcpy(buf, dma_buf->va, copy_size); + } + } + retval = le16_to_cpu(desc->retval); + if (retval) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue command completed with error 0x%x\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if (!status && retval != ICE_AQ_RC_OK) + status = ICE_ERR_AQ_ERROR; + cq->sq_last_status = (enum ice_aq_err)retval; + } + + ice_debug(hw, ICE_DBG_AQ_MSG, + "ATQ: desc and buffer writeback:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); + + /* save writeback AQ if requested */ + if (details->wb_desc) + memcpy(details->wb_desc, desc_on_ring, + sizeof(*details->wb_desc)); + + /* update the error if time out occurred */ + if (!cmd_completed) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue Writeback timeout.\n"); + status = ICE_ERR_AQ_TIMEOUT; + } + +sq_send_command_error: + mutex_unlock(&cq->sq_lock); + return status; +} + +/** + * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + */ +void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) +{ + /* zero out the desc */ + memset(desc, 0, sizeof(*desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); +} + +/** + * ice_clean_rq_elem + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending'. + */ +enum ice_status +ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_rq_event_info *e, u16 *pending) +{ + u16 ntc = cq->rq.next_to_clean; + enum ice_status ret_code = 0; + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + + /* take the lock before we start messing with the ring */ + mutex_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive queue not initialized.\n"); + ret_code = ICE_ERR_AQ_EMPTY; + goto clean_rq_elem_err; + } + + /* set next_to_use to head */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = ICE_ERR_AQ_NO_WORK; + goto clean_rq_elem_out; + } + + /* now clean the next descriptor */ + desc = ICE_CTL_Q_DESC(cq->rq, ntc); + desc_idx = ntc; + + flags = le16_to_cpu(desc->flags); + if (flags & ICE_AQ_FLAG_ERR) { + ret_code = ICE_ERR_AQ_ERROR; + cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive Queue Event received with error 0x%x\n", + cq->rq_last_status); + } + memcpy(&e->desc, desc, sizeof(e->desc)); + datalen = le16_to_cpu(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf && e->msg_len) + memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); + + ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, + cq->rq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message size + */ + bi = &cq->rq.r.rq_bi[ntc]; + memset(desc, 0, sizeof(*desc)); + + desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); + desc->datalen = cpu_to_le16(bi->size); + desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, cq->rq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == cq->num_rq_entries) + ntc = 0; + cq->rq.next_to_clean = ntc; + cq->rq.next_to_use = ntu; + +clean_rq_elem_out: + /* Set pending if needed, unlock and return */ + if (pending) + *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); +clean_rq_elem_err: + mutex_unlock(&cq->rq_lock); + + return ret_code; +} diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h new file mode 100644 index 000000000000..ea02b89243e2 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_CONTROLQ_H_ +#define _ICE_CONTROLQ_H_ + +#include "ice_adminq_cmd.h" + +/* Maximum buffer lengths for all control queue types */ +#define ICE_AQ_MAX_BUF_LEN 4096 + +#define ICE_CTL_Q_DESC(R, i) \ + (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) + +#define ICE_CTL_Q_DESC_UNUSED(R) \ + (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* Defines that help manage the driver vs FW API checks. + * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. + * + */ +#define EXP_FW_API_VER_BRANCH 0x00 +#define EXP_FW_API_VER_MAJOR 0x00 +#define EXP_FW_API_VER_MINOR 0x01 + +/* Different control queue types: These are mainly for SW consumption. */ +enum ice_ctl_q { + ICE_CTL_Q_UNKNOWN = 0, + ICE_CTL_Q_ADMIN, +}; + +/* Control Queue default settings */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ + +struct ice_ctl_q_ring { + void *dma_head; /* Virtual address to dma head */ + struct ice_dma_mem desc_buf; /* descriptor ring memory */ + void *cmd_buf; /* command buffer memory */ + + union { + struct ice_dma_mem *sq_bi; + struct ice_dma_mem *rq_bi; + } r; + + u16 count; /* Number of descriptors */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; + u32 len_mask; + u32 len_ena_mask; + u32 head_mask; +}; + +/* sq transaction details */ +struct ice_sq_cd { + struct ice_aq_desc *wb_desc; +}; + +#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) + +/* rq event information */ +struct ice_rq_event_info { + struct ice_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Control Queue information */ +struct ice_ctl_q_info { + enum ice_ctl_q qtype; + struct ice_ctl_q_ring rq; /* receive queue */ + struct ice_ctl_q_ring sq; /* send queue */ + u32 sq_cmd_timeout; /* send queue cmd write back timeout */ + u16 num_rq_entries; /* receive queue depth */ + u16 num_sq_entries; /* send queue depth */ + u16 rq_buf_size; /* receive queue buffer size */ + u16 sq_buf_size; /* send queue buffer size */ + struct mutex sq_lock; /* Send queue lock */ + struct mutex rq_lock; /* Receive queue lock */ + enum ice_aq_err sq_last_status; /* last status on send queue */ + enum ice_aq_err rq_last_status; /* last status on receive queue */ +}; + +#endif /* _ICE_CONTROLQ_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h new file mode 100644 index 000000000000..0e14d7215a6e --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_DEVIDS_H_ +#define _ICE_DEVIDS_H_ + +/* Device IDs */ +/* Intel(R) Ethernet Controller C810 for backplane */ +#define ICE_DEV_ID_C810_BACKPLANE 0x1591 +/* Intel(R) Ethernet Controller C810 for QSFP */ +#define ICE_DEV_ID_C810_QSFP 0x1592 +/* Intel(R) Ethernet Controller C810 for SFP */ +#define ICE_DEV_ID_C810_SFP 0x1593 +/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */ +#define ICE_DEV_ID_C810_10G_BASE_T 0x1594 +/* Intel(R) Ethernet Controller C810 1GbE */ +#define ICE_DEV_ID_C810_SGMII 0x1595 + +#endif /* _ICE_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c new file mode 100644 index 000000000000..186764a5c263 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* ethtool support for ice */ + +#include "ice.h" + +struct ice_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ICE_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +#define ICE_VSI_STAT(_name, _stat) \ + ICE_STAT(struct ice_vsi, _name, _stat) +#define ICE_PF_STAT(_name, _stat) \ + ICE_STAT(struct ice_pf, _name, _stat) + +static int ice_q_stats_len(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return ((np->vsi->num_txq + np->vsi->num_rxq) * + (sizeof(struct ice_q_stats) / sizeof(u64))); +} + +#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) +#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) + +#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \ + ice_q_stats_len(n)) + +static const struct ice_stats ice_gstrings_vsi_stats[] = { + ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), + ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), + ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), + ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), + ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), + ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), + ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), + ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), + ICE_VSI_STAT("rx_discards", eth_stats.rx_discards), + ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), + ICE_VSI_STAT("tx_linearize", tx_linearize), + ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), + ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), + ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), +}; + +/* These PF_STATs might look like duplicates of some NETDEV_STATs, + * but they aren't. This device is capable of supporting multiple + * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual + * netdevs whereas the PF_STATs are for the physical function that's + * hosting these netdevs. + * + * The PF_STATs are appended to the netdev stats only when ethtool -S + * is queried on the base PF netdev. + */ +static struct ice_stats ice_gstrings_pf_stats[] = { + ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), + ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), + ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), + ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast), + ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast), + ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast), + ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), + ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), + ICE_PF_STAT("tx_errors", stats.eth.tx_errors), + ICE_PF_STAT("tx_size_64", stats.tx_size_64), + ICE_PF_STAT("rx_size_64", stats.rx_size_64), + ICE_PF_STAT("tx_size_127", stats.tx_size_127), + ICE_PF_STAT("rx_size_127", stats.rx_size_127), + ICE_PF_STAT("tx_size_255", stats.tx_size_255), + ICE_PF_STAT("rx_size_255", stats.rx_size_255), + ICE_PF_STAT("tx_size_511", stats.tx_size_511), + ICE_PF_STAT("rx_size_511", stats.rx_size_511), + ICE_PF_STAT("tx_size_1023", stats.tx_size_1023), + ICE_PF_STAT("rx_size_1023", stats.rx_size_1023), + ICE_PF_STAT("tx_size_1522", stats.tx_size_1522), + ICE_PF_STAT("rx_size_1522", stats.rx_size_1522), + ICE_PF_STAT("tx_size_big", stats.tx_size_big), + ICE_PF_STAT("rx_size_big", stats.rx_size_big), + ICE_PF_STAT("link_xon_tx", stats.link_xon_tx), + ICE_PF_STAT("link_xon_rx", stats.link_xon_rx), + ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx), + ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx), + ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), + ICE_PF_STAT("rx_undersize", stats.rx_undersize), + ICE_PF_STAT("rx_fragments", stats.rx_fragments), + ICE_PF_STAT("rx_oversize", stats.rx_oversize), + ICE_PF_STAT("rx_jabber", stats.rx_jabber), + ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error), + ICE_PF_STAT("rx_length_errors", stats.rx_len_errors), + ICE_PF_STAT("rx_dropped", stats.eth.rx_discards), + ICE_PF_STAT("rx_crc_errors", stats.crc_errors), + ICE_PF_STAT("illegal_bytes", stats.illegal_bytes), + ICE_PF_STAT("mac_local_faults", stats.mac_local_faults), + ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), +}; + +static u32 ice_regs_dump_list[] = { + PFGEN_STATE, + PRTGEN_STATUS, + QRX_CTRL(0), + QINT_TQCTL(0), + QINT_RQCTL(0), + PFINT_OICR_ENA, + QRX_ITR(0), +}; + +/** + * ice_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + */ +static char *ice_nvm_version_str(struct ice_hw *hw) +{ + static char buf[ICE_ETHTOOL_FWVER_LEN]; + u8 ver, patch; + u32 full_ver; + u16 build; + + full_ver = hw->nvm.oem_ver; + ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); + build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >> + ICE_OEM_VER_BUILD_SHIFT); + patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK); + + snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", + (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT, + (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT, + hw->nvm.eetrack, ver, build, patch); + + return buf; +} + +static void +ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw), + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), + sizeof(drvinfo->bus_info)); +} + +static int ice_get_regs_len(struct net_device __always_unused *netdev) +{ + return ARRAY_SIZE(ice_regs_dump_list); +} + +static void +ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_hw *hw = &pf->hw; + u32 *regs_buf = (u32 *)p; + int i; + + regs->version = 1; + + for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i) + regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); +} + +static u32 ice_get_msglevel(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + +#ifndef CONFIG_DYNAMIC_DEBUG + if (pf->hw.debug_mask) + netdev_info(netdev, "hw debug_mask: 0x%llX\n", + pf->hw.debug_mask); +#endif /* !CONFIG_DYNAMIC_DEBUG */ + + return pf->msg_enable; +} + +static void ice_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + +#ifndef CONFIG_DYNAMIC_DEBUG + if (ICE_DBG_USER & data) + pf->hw.debug_mask = data; + else + pf->msg_enable = data; +#else + pf->msg_enable = data; +#endif /* !CONFIG_DYNAMIC_DEBUG */ +} + +static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + char *p = (char *)data; + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ICE_VSI_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + ice_gstrings_vsi_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + + ice_for_each_txq(vsi, i) { + snprintf(p, ETH_GSTRING_LEN, + "tx-queue-%u.tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i); + p += ETH_GSTRING_LEN; + } + + ice_for_each_rxq(vsi, i) { + snprintf(p, ETH_GSTRING_LEN, + "rx-queue-%u.rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i); + p += ETH_GSTRING_LEN; + } + + if (vsi->type != ICE_VSI_PF) + return; + + for (i = 0; i < ICE_PF_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "port.%s", + ice_gstrings_pf_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + + break; + default: + break; + } +} + +static int ice_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ICE_ALL_STATS_LEN(netdev); + default: + return -EOPNOTSUPP; + } +} + +static void +ice_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_ring *ring; + unsigned int j = 0; + int i = 0; + char *p; + + for (j = 0; j < ICE_VSI_STATS_LEN; j++) { + p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; + data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + /* populate per queue stats */ + rcu_read_lock(); + + ice_for_each_txq(vsi, j) { + ring = READ_ONCE(vsi->tx_rings[j]); + if (!ring) + continue; + data[i++] = ring->stats.pkts; + data[i++] = ring->stats.bytes; + } + + ice_for_each_rxq(vsi, j) { + ring = READ_ONCE(vsi->rx_rings[j]); + data[i++] = ring->stats.pkts; + data[i++] = ring->stats.bytes; + } + + rcu_read_unlock(); + + if (vsi->type != ICE_VSI_PF) + return; + + for (j = 0; j < ICE_PF_STATS_LEN; j++) { + p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset; + data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static int +ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_vsi *vsi = np->vsi; + bool link_up; + + hw_link_info = &vsi->port_info->phy.link_info; + link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + + /* set speed and duplex */ + if (link_up) { + switch (hw_link_info->link_speed) { + case ICE_AQ_LINK_SPEED_100MB: + ks->base.speed = SPEED_100; + break; + case ICE_AQ_LINK_SPEED_2500MB: + ks->base.speed = SPEED_2500; + break; + case ICE_AQ_LINK_SPEED_5GB: + ks->base.speed = SPEED_5000; + break; + case ICE_AQ_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case ICE_AQ_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case ICE_AQ_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + default: + ks->base.speed = SPEED_UNKNOWN; + break; + } + + ks->base.duplex = DUPLEX_FULL; + } else { + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + } + + /* set autoneg settings */ + ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* set media type settings */ + switch (vsi->port_info->phy.media_type) { + case ICE_MEDIA_FIBER: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case ICE_MEDIA_BASET: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + ks->base.port = PORT_TP; + break; + case ICE_MEDIA_BACKPLANE: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ks->base.port = PORT_NONE; + break; + case ICE_MEDIA_DA: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_DA; + break; + default: + ks->base.port = PORT_OTHER; + break; + } + + /* flow control is symmetric and always supported */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + switch (vsi->port_info->fc.req_mode) { + case ICE_FC_FULL: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + break; + case ICE_FC_TX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + case ICE_FC_RX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + case ICE_FC_PFC: + default: + ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, advertising, + Asym_Pause); + break; + } + + return 0; +} + +/** + * ice_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: buffer to rturn Rx flow classification rules + * + * Returns Success if the command is supported. + */ +static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi->rss_size; + ret = 0; + break; + default: + break; + } + + return ret; +} + +static void +ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + ring->rx_max_pending = ICE_MAX_NUM_DESC; + ring->tx_max_pending = ICE_MAX_NUM_DESC; + ring->rx_pending = vsi->rx_rings[0]->count; + ring->tx_pending = vsi->tx_rings[0]->count; + ring->rx_mini_pending = ICE_MIN_NUM_DESC; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int +ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct ice_ring *tx_rings = NULL, *rx_rings = NULL; + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + int i, timeout = 50, err = 0; + u32 new_rx_cnt, new_tx_cnt; + + if (ring->tx_pending > ICE_MAX_NUM_DESC || + ring->tx_pending < ICE_MIN_NUM_DESC || + ring->rx_pending > ICE_MAX_NUM_DESC || + ring->rx_pending < ICE_MIN_NUM_DESC) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC); + return -EINVAL; + } + + new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); + new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); + + /* if nothing to do return success */ + if (new_tx_cnt == vsi->tx_rings[0]->count && + new_rx_cnt == vsi->rx_rings[0]->count) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); + return 0; + } + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + /* set for the next time the netdev is started */ + if (!netif_running(vsi->netdev)) { + for (i = 0; i < vsi->alloc_txq; i++) + vsi->tx_rings[i]->count = new_tx_cnt; + for (i = 0; i < vsi->alloc_rxq; i++) + vsi->rx_rings[i]->count = new_rx_cnt; + netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); + goto done; + } + + if (new_tx_cnt == vsi->tx_rings[0]->count) + goto process_rx; + + /* alloc updated Tx resources */ + netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", + vsi->tx_rings[0]->count, new_tx_cnt); + + tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(struct ice_ring), GFP_KERNEL); + if (!tx_rings) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < vsi->num_txq; i++) { + /* clone ring and setup updated count */ + tx_rings[i] = *vsi->tx_rings[i]; + tx_rings[i].count = new_tx_cnt; + tx_rings[i].desc = NULL; + tx_rings[i].tx_buf = NULL; + err = ice_setup_tx_ring(&tx_rings[i]); + if (err) { + while (i) { + i--; + ice_clean_tx_ring(&tx_rings[i]); + } + devm_kfree(&pf->pdev->dev, tx_rings); + goto done; + } + } + +process_rx: + if (new_rx_cnt == vsi->rx_rings[0]->count) + goto process_link; + + /* alloc updated Rx resources */ + netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", + vsi->rx_rings[0]->count, new_rx_cnt); + + rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(struct ice_ring), GFP_KERNEL); + if (!rx_rings) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < vsi->num_rxq; i++) { + /* clone ring and setup updated count */ + rx_rings[i] = *vsi->rx_rings[i]; + rx_rings[i].count = new_rx_cnt; + rx_rings[i].desc = NULL; + rx_rings[i].rx_buf = NULL; + /* this is to allow wr32 to have something to write to + * during early allocation of Rx buffers + */ + rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS; + + err = ice_setup_rx_ring(&rx_rings[i]); + if (err) + goto rx_unwind; + + /* allocate Rx buffers */ + err = ice_alloc_rx_bufs(&rx_rings[i], + ICE_DESC_UNUSED(&rx_rings[i])); +rx_unwind: + if (err) { + while (i) { + i--; + ice_free_rx_ring(&rx_rings[i]); + } + devm_kfree(&pf->pdev->dev, rx_rings); + err = -ENOMEM; + goto free_tx; + } + } + +process_link: + /* Bring interface down, copy in the new ring info, then restore the + * interface. if VSI is up, bring it down and then back up + */ + if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + ice_down(vsi); + + if (tx_rings) { + for (i = 0; i < vsi->alloc_txq; i++) { + ice_free_tx_ring(vsi->tx_rings[i]); + *vsi->tx_rings[i] = tx_rings[i]; + } + devm_kfree(&pf->pdev->dev, tx_rings); + } + + if (rx_rings) { + for (i = 0; i < vsi->alloc_rxq; i++) { + ice_free_rx_ring(vsi->rx_rings[i]); + /* copy the real tail offset */ + rx_rings[i].tail = vsi->rx_rings[i]->tail; + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_rings[i].next_to_use = 0; + rx_rings[i].next_to_clean = 0; + rx_rings[i].next_to_alloc = 0; + *vsi->rx_rings[i] = rx_rings[i]; + } + devm_kfree(&pf->pdev->dev, rx_rings); + } + + ice_up(vsi); + } + goto done; + +free_tx: + /* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_rings) { + for (i = 0; i < vsi->alloc_txq; i++) + ice_free_tx_ring(&tx_rings[i]); + devm_kfree(&pf->pdev->dev, tx_rings); + } + +done: + clear_bit(__ICE_CFG_BUSY, pf->state); + return err; +} + +static int ice_nway_reset(struct net_device *netdev) +{ + /* restart autonegotiation */ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; + enum ice_status status; + bool link_up; + + pi = vsi->port_info; + hw_link_info = &pi->phy.link_info; + link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + + status = ice_aq_set_link_restart_an(pi, link_up, NULL); + if (status) { + netdev_info(netdev, "link restart failed, err %d aq_err %d\n", + status, pi->hw->adminq.sq_last_status); + return -EIO; + } + + return 0; +} + +/** + * ice_get_pauseparam - Get Flow Control status + * @netdev: network interface device structure + * @pause: ethernet pause (flow control) parameters + */ +static void +ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_info *pi; + + pi = np->vsi->port_info; + pause->autoneg = + ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (pi->fc.current_mode == ICE_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (pi->fc.current_mode == ICE_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +/** + * ice_set_pauseparam - Set Flow Control parameter + * @netdev: network interface device structure + * @pause: return tx/rx flow control status + */ +static int +ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_pf *pf = np->vsi->back; + struct ice_vsi *vsi = np->vsi; + struct ice_hw *hw = &pf->hw; + struct ice_port_info *pi; + enum ice_status status; + u8 aq_failures; + bool link_up; + int err = 0; + + pi = vsi->port_info; + hw_link_info = &pi->phy.link_info; + link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + + /* Changing the port's flow control is not supported if this isn't the + * PF VSI + */ + if (vsi->type != ICE_VSI_PF) { + netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n"); + return -EOPNOTSUPP; + } + + if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { + netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); + return -EOPNOTSUPP; + } + + /* If we have link and don't have autoneg */ + if (!test_bit(__ICE_DOWN, pf->state) && + !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { + /* Send message that it might not necessarily work*/ + netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); + } + + if (pause->rx_pause && pause->tx_pause) + pi->fc.req_mode = ICE_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + pi->fc.req_mode = ICE_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + pi->fc.req_mode = ICE_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + pi->fc.req_mode = ICE_FC_NONE; + else + return -EINVAL; + + /* Tell the OS link is going down, the link will go back up when fw + * says it is ready asynchronously + */ + ice_print_link_msg(vsi, false); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* Set the FC mode and only restart AN if link is up */ + status = ice_set_fc(pi, &aq_failures, link_up); + + if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + err = -EAGAIN; + } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + err = -EAGAIN; + } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { + netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + err = -EAGAIN; + } + + if (!test_bit(__ICE_DOWN, pf->state)) { + /* Give it a little more time to try to come back */ + msleep(75); + if (!test_bit(__ICE_DOWN, pf->state)) + return ice_nway_reset(netdev); + } + + return err; +} + +/** + * ice_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + */ +static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) +{ + return ICE_VSIQF_HKEY_ARRAY_SIZE; +} + +/** + * ice_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + */ +static u32 ice_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return np->vsi->rss_table_size; +} + +/** + * ice_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Reads the indirection table directly from the hardware. + */ +static int +ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + int ret = 0, i; + u8 *lut; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + /* RSS not supported return error here */ + netdev_warn(netdev, "RSS is not configured on this VSI!\n"); + return -EIO; + } + + lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) { + ret = -EIO; + goto out; + } + + for (i = 0; i < vsi->rss_table_size; i++) + indir[i] = (u32)(lut[i]); + +out: + devm_kfree(&pf->pdev->dev, lut); + return ret; +} + +/** + * ice_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Returns -EINVAL if the table specifies an invalid queue id, otherwise + * returns 0 after programming the table. + */ +static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u8 *seed = NULL; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + /* RSS not supported return error here */ + netdev_warn(netdev, "RSS is not configured on this VSI!\n"); + return -EIO; + } + + if (key) { + if (!vsi->rss_hkey_user) { + vsi->rss_hkey_user = + devm_kzalloc(&pf->pdev->dev, + ICE_VSIQF_HKEY_ARRAY_SIZE, + GFP_KERNEL); + if (!vsi->rss_hkey_user) + return -ENOMEM; + } + memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); + seed = vsi->rss_hkey_user; + } + + if (!vsi->rss_lut_user) { + vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev, + vsi->rss_table_size, + GFP_KERNEL); + if (!vsi->rss_lut_user) + return -ENOMEM; + } + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + if (indir) { + int i; + + for (i = 0; i < vsi->rss_table_size; i++) + vsi->rss_lut_user[i] = (u8)(indir[i]); + } else { + ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, + vsi->rss_size); + } + + if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size)) + return -EIO; + + return 0; +} + +static const struct ethtool_ops ice_ethtool_ops = { + .get_link_ksettings = ice_get_link_ksettings, + .get_drvinfo = ice_get_drvinfo, + .get_regs_len = ice_get_regs_len, + .get_regs = ice_get_regs, + .get_msglevel = ice_get_msglevel, + .set_msglevel = ice_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, + .get_rxnfc = ice_get_rxnfc, + .get_ringparam = ice_get_ringparam, + .set_ringparam = ice_set_ringparam, + .nway_reset = ice_nway_reset, + .get_pauseparam = ice_get_pauseparam, + .set_pauseparam = ice_set_pauseparam, + .get_rxfh_key_size = ice_get_rxfh_key_size, + .get_rxfh_indir_size = ice_get_rxfh_indir_size, + .get_rxfh = ice_get_rxfh, + .set_rxfh = ice_set_rxfh, +}; + +/** + * ice_set_ethtool_ops - setup netdev ethtool ops + * @netdev: network interface device structure + * + * setup netdev ethtool ops with ice specific ops + */ +void ice_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h new file mode 100644 index 000000000000..1b9e2ef48a9d --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +/* Machine-generated file */ + +#ifndef _ICE_HW_AUTOGEN_H_ +#define _ICE_HW_AUTOGEN_H_ + +#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define PF_FW_ARQBAH 0x00080180 +#define PF_FW_ARQBAL 0x00080080 +#define PF_FW_ARQH 0x00080380 +#define PF_FW_ARQH_ARQH_S 0 +#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S) +#define PF_FW_ARQLEN 0x00080280 +#define PF_FW_ARQLEN_ARQLEN_S 0 +#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S) +#define PF_FW_ARQLEN_ARQVFE_S 28 +#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) +#define PF_FW_ARQLEN_ARQOVFL_S 29 +#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) +#define PF_FW_ARQLEN_ARQCRIT_S 30 +#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) +#define PF_FW_ARQLEN_ARQENABLE_S 31 +#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) +#define PF_FW_ARQT 0x00080480 +#define PF_FW_ATQBAH 0x00080100 +#define PF_FW_ATQBAL 0x00080000 +#define PF_FW_ATQH 0x00080300 +#define PF_FW_ATQH_ATQH_S 0 +#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S) +#define PF_FW_ATQLEN 0x00080200 +#define PF_FW_ATQLEN_ATQLEN_S 0 +#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S) +#define PF_FW_ATQLEN_ATQVFE_S 28 +#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) +#define PF_FW_ATQLEN_ATQOVFL_S 29 +#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) +#define PF_FW_ATQLEN_ATQCRIT_S 30 +#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) +#define PF_FW_ATQLEN_ATQENABLE_S 31 +#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) +#define PF_FW_ATQT 0x00080400 + +#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) +#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4)) +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4)) +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4)) +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S) + +#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) +#define QRXFLXP_CNTXT_RXDID_IDX_S 0 +#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S) +#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 +#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S) +#define QRXFLXP_CNTXT_TS_S 11 +#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S) +#define GLGEN_RSTAT 0x000B8188 +#define GLGEN_RSTAT_DEVSTATE_S 0 +#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) +#define GLGEN_RSTCTL 0x000B8180 +#define GLGEN_RSTCTL_GRSTDEL_S 0 +#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) +#define GLGEN_RSTAT_RESET_TYPE_S 2 +#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S) +#define GLGEN_RTRIG 0x000B8190 +#define GLGEN_RTRIG_CORER_S 0 +#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S) +#define GLGEN_RTRIG_GLOBR_S 1 +#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S) +#define GLGEN_STAT 0x000B612C +#define PFGEN_CTRL 0x00091000 +#define PFGEN_CTRL_PFSWR_S 0 +#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S) +#define PFGEN_STATE 0x00088000 +#define PRTGEN_STATUS 0x000B8100 +#define PFHMC_ERRORDATA 0x00520500 +#define PFHMC_ERRORINFO 0x00520400 +#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) +#define GLINT_DYN_CTL_INTENA_S 0 +#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S) +#define GLINT_DYN_CTL_CLEARPBA_S 1 +#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S) +#define GLINT_DYN_CTL_SWINT_TRIG_S 2 +#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S) +#define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 +#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S) +#define GLINT_DYN_CTL_INTENA_MSK_S 31 +#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S) +#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) +#define PFINT_FW_CTL 0x0016C800 +#define PFINT_FW_CTL_MSIX_INDX_S 0 +#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S) +#define PFINT_FW_CTL_ITR_INDX_S 11 +#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S) +#define PFINT_FW_CTL_CAUSE_ENA_S 30 +#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) +#define PFINT_OICR 0x0016CA00 +#define PFINT_OICR_INTEVENT_S 0 +#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S) +#define PFINT_OICR_HLP_RDY_S 14 +#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) +#define PFINT_OICR_CPM_RDY_S 15 +#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) +#define PFINT_OICR_ECC_ERR_S 16 +#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) +#define PFINT_OICR_MAL_DETECT_S 19 +#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S) +#define PFINT_OICR_GRST_S 20 +#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) +#define PFINT_OICR_PCI_EXCEPTION_S 21 +#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) +#define PFINT_OICR_GPIO_S 22 +#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) +#define PFINT_OICR_STORM_DETECT_S 24 +#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) +#define PFINT_OICR_HMC_ERR_S 26 +#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) +#define PFINT_OICR_PE_CRITERR_S 28 +#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S) +#define PFINT_OICR_CTL 0x0016CA80 +#define PFINT_OICR_CTL_MSIX_INDX_S 0 +#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S) +#define PFINT_OICR_CTL_ITR_INDX_S 11 +#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S) +#define PFINT_OICR_CTL_CAUSE_ENA_S 30 +#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S) +#define PFINT_OICR_ENA 0x0016C900 +#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) +#define QINT_RQCTL_MSIX_INDX_S 0 +#define QINT_RQCTL_ITR_INDX_S 11 +#define QINT_RQCTL_CAUSE_ENA_S 30 +#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S) +#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) +#define QINT_TQCTL_MSIX_INDX_S 0 +#define QINT_TQCTL_ITR_INDX_S 11 +#define QINT_TQCTL_CAUSE_ENA_S 30 +#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S) +#define GLLAN_RCTL_0 0x002941F8 +#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) +#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) +#define QRX_CTRL_MAX_INDEX 2047 +#define QRX_CTRL_QENA_REQ_S 0 +#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S) +#define QRX_CTRL_QENA_STAT_S 2 +#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S) +#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) +#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) +#define GLNVM_FLA 0x000B6108 +#define GLNVM_FLA_LOCKED_S 6 +#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) +#define GLNVM_GENS 0x000B6100 +#define GLNVM_GENS_SR_SIZE_S 5 +#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S) +#define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_CORER_DONE_S 3 +#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S) +#define GLNVM_ULD_GLOBR_DONE_S 4 +#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S) +#define PF_FUNC_RID 0x0009E880 +#define PF_FUNC_RID_FUNC_NUM_S 0 +#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S) +#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) +#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) +#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) +#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) +#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) +#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) +#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) +#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) +#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) +#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) +#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) +#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) +#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) +#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) +#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) +#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) +#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) +#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) +#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) +#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) +#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) +#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) +#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) +#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) +#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) +#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) +#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) +#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) +#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) +#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) +#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) +#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) +#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) +#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) +#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) +#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) +#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) +#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) +#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) +#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) +#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) +#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) +#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) +#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) +#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) +#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) +#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) +#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) +#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) +#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) +#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) +#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) +#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) +#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) +#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) +#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) +#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) +#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) +#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) +#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) +#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) +#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) +#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) +#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) +#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) +#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) +#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) +#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) +#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) +#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) +#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) +#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) +#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) +#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) +#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) +#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define VSIQF_HKEY_MAX_INDEX 12 + +#endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h new file mode 100644 index 000000000000..d23a91665b46 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -0,0 +1,473 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_LAN_TX_RX_H_ +#define _ICE_LAN_TX_RX_H_ + +union ice_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow Director filter id */ + } hi_dword; + } qword0; + struct { + /* status/error/PTYPE/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; + } wb; /* writeback */ +}; + +struct ice_rx_ptype_decoded { + u32 ptype:10; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum ice_rx_ptype_outer_ip { + ICE_RX_PTYPE_OUTER_L2 = 0, + ICE_RX_PTYPE_OUTER_IP = 1, +}; + +enum ice_rx_ptype_outer_ip_ver { + ICE_RX_PTYPE_OUTER_NONE = 0, + ICE_RX_PTYPE_OUTER_IPV4 = 1, + ICE_RX_PTYPE_OUTER_IPV6 = 2, +}; + +enum ice_rx_ptype_outer_fragmented { + ICE_RX_PTYPE_NOT_FRAG = 0, + ICE_RX_PTYPE_FRAG = 1, +}; + +enum ice_rx_ptype_tunnel_type { + ICE_RX_PTYPE_TUNNEL_NONE = 0, + ICE_RX_PTYPE_TUNNEL_IP_IP = 1, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum ice_rx_ptype_tunnel_end_prot { + ICE_RX_PTYPE_TUNNEL_END_NONE = 0, + ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, + ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum ice_rx_ptype_inner_prot { + ICE_RX_PTYPE_INNER_PROT_NONE = 0, + ICE_RX_PTYPE_INNER_PROT_UDP = 1, + ICE_RX_PTYPE_INNER_PROT_TCP = 2, + ICE_RX_PTYPE_INNER_PROT_SCTP = 3, + ICE_RX_PTYPE_INNER_PROT_ICMP = 4, + ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, +}; + +enum ice_rx_ptype_payload_layer { + ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +/* RX Flex Descriptor + * This descriptor is used instead of the legacy version descriptor when + * ice_rlan_ctx.adv_desc is set + */ +union ice_32b_rx_flex_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile id */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 time_stamp_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flex_meta2; + __le16 flex_meta3; + union { + struct { + __le16 flex_meta4; + __le16 flex_meta5; + } flex; + __le32 ts_high; + } flex_ts; + } wb; /* writeback */ +}; + +/* Rx Flex Descriptor NIC Profile + * This descriptor corresponds to RxDID 2 which contains + * metadata fields for RSS, flow id and timestamp info + */ +struct ice_32b_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 vlan_id; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Receive Flex Descriptor profile IDs: There are a total + * of 64 profiles where profile IDs 0/1 are for legacy; and + * profiles 2-63 are flex profiles that can be programmed + * with a specific metadata (profile 7 reserved for HW) + */ +enum ice_rxdid { + ICE_RXDID_START = 0, + ICE_RXDID_LEGACY_0 = ICE_RXDID_START, + ICE_RXDID_LEGACY_1, + ICE_RXDID_FLX_START, + ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START, + ICE_RXDID_FLX_LAST = 63, + ICE_RXDID_LAST = ICE_RXDID_FLX_LAST +}; + +/* Receive Flex Descriptor Rx opcode values */ +#define ICE_RX_OPC_MDID 0x01 + +/* Receive Descriptor MDID values */ +#define ICE_RX_MDID_FLOW_ID_LOWER 5 +#define ICE_RX_MDID_FLOW_ID_HIGH 6 +#define ICE_RX_MDID_HASH_LOW 56 +#define ICE_RX_MDID_HASH_HIGH 57 + +/* Rx Flag64 packet flag bits */ +enum ice_rx_flg64_bits { + ICE_RXFLG_PKT_DSI = 0, + ICE_RXFLG_EVLAN_x8100 = 15, + ICE_RXFLG_EVLAN_x9100, + ICE_RXFLG_VLAN_x8100, + ICE_RXFLG_TNL_MAC = 22, + ICE_RXFLG_TNL_VLAN, + ICE_RXFLG_PKT_FRG, + ICE_RXFLG_FIN = 32, + ICE_RXFLG_SYN, + ICE_RXFLG_RST, + ICE_RXFLG_TNL0 = 38, + ICE_RXFLG_TNL1, + ICE_RXFLG_TNL2, + ICE_RXFLG_UDP_GRE, + ICE_RXFLG_RSVD = 63 +}; + +/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ +#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ + +/* for ice_32byte_rx_flex_desc.pkt_length member */ +#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ + +enum ice_rx_flex_desc_status_error_0_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, + ICE_RX_FLEX_DESC_STATUS0_EOF_S, + ICE_RX_FLEX_DESC_STATUS0_HBO_S, + ICE_RX_FLEX_DESC_STATUS0_L3L4P_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, + ICE_RX_FLEX_DESC_STATUS0_LPBK_S, + ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, + ICE_RX_FLEX_DESC_STATUS0_RXE_S, + ICE_RX_FLEX_DESC_STATUS0_CRCP_S, + ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S, + ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ +}; + +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) + +/* RLAN Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct ice_rlan_ctx { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define ICE_RLAN_CTX_DBUF_S 7 + u16 dbuf; /* bigger than needed, see above for reason */ +#define ICE_RLAN_CTX_HBUF_S 6 + u16 hbuf; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ +}; + +struct ice_ctx_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ + .offset = offsetof(struct _struct, _ele), \ + .size_of = FIELD_SIZEOF(struct _struct, _ele), \ + .width = _width, \ + .lsb = _lsb, \ +} + +/* for hsplit_0 field of Rx RLAN context */ +enum ice_rlan_ctx_rx_hsplit_0 { + ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, + ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1, + ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2, + ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* for hsplit_1 field of Rx RLAN context */ +enum ice_rlan_ctx_rx_hsplit_1 { + ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0, + ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1, + ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, +}; + +/* TX Descriptor */ +struct ice_tx_desc { + __le64 buf_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +enum ice_tx_desc_dtype_value { + ICE_TX_DESC_DTYPE_DATA = 0x0, + ICE_TX_DESC_DTYPE_CTX = 0x1, + /* DESC_DONE - HW has completed write-back of descriptor */ + ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, +}; + +#define ICE_TXD_QW1_CMD_S 4 +#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S) + +enum ice_tx_desc_cmd_bits { + ICE_TX_DESC_CMD_EOP = 0x0001, + ICE_TX_DESC_CMD_RS = 0x0002, + ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, + ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ +}; + +#define ICE_TXD_QW1_OFFSET_S 16 +#define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S) + +enum ice_tx_desc_len_fields { + /* Note: These are predefined bit offsets */ + ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ + ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ + ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ +}; + +#define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S) +#define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S) +#define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S) + +/* Tx descriptor field limits in bytes */ +#define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \ + ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD) +#define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \ + ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD) +#define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \ + ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD) + +#define ICE_TXD_QW1_TX_BUF_SZ_S 34 +#define ICE_TXD_QW1_L2TAG1_S 48 + +/* Context descriptors */ +struct ice_tx_ctx_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 qw1; +}; + +#define ICE_TXD_CTX_QW1_CMD_S 4 +#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) + +#define ICE_TXD_CTX_QW1_TSO_LEN_S 30 +#define ICE_TXD_CTX_QW1_TSO_LEN_M \ + (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) + +#define ICE_TXD_CTX_QW1_MSS_S 50 + +enum ice_tx_ctx_desc_cmd_bits { + ICE_TX_CTX_DESC_TSO = 0x01, + ICE_TX_CTX_DESC_TSYN = 0x02, + ICE_TX_CTX_DESC_IL2TAG2 = 0x04, + ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + ICE_TX_CTX_DESC_SWTCH_VSI = 0x30, + ICE_TX_CTX_DESC_RESERVED = 0x40 +}; + +#define ICE_LAN_TXQ_MAX_QGRPS 127 +#define ICE_LAN_TXQ_MAX_QDIS 1023 + +/* Tx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct ice_tlan_ctx { +#define ICE_TLAN_CTX_BASE_S 7 + u64 base; /* base is defined in 128-byte units */ + u8 port_num; + u16 cgd_num; /* bigger than needed, see above for reason */ + u8 pf_num; + u16 vmvf_num; + u8 vmvf_type; +#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TLAN_CTX_VMVF_TYPE_PF 2 + u16 src_vsi; + u8 tsyn_ena; + u8 alt_vlan; + u16 cpuid; /* bigger than needed, see above for reason */ + u8 wb_mode; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; + u16 cmpq_id; + u16 qnum_in_func; + u8 itr_notification_mode; + u8 adjust_prof_id; + u32 qlen; /* bigger than needed, see above for reason */ + u8 quanta_prof_idx; + u8 tso_ena; + u16 tso_qnum; + u8 legacy_int; + u8 drop_ena; + u8 cache_prof_idx; + u8 pkt_shaper_prof_idx; + u8 int_q_state; /* width not needed - internal do not write */ +}; + +/* macro to make the table lines short */ +#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + ICE_RX_PTYPE_OUTER_##OUTER_IP, \ + ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + ICE_RX_PTYPE_##OUTER_FRAG, \ + ICE_RX_PTYPE_TUNNEL_##T, \ + ICE_RX_PTYPE_TUNNEL_END_##TE, \ + ICE_RX_PTYPE_##TEF, \ + ICE_RX_PTYPE_INNER_PROT_##I, \ + ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { + /* L2 Packet types */ + ICE_PTT_UNUSED_ENTRY(0), + ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), +}; + +static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) +{ + return ice_ptype_lkup[ptype]; +} +#endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c new file mode 100644 index 000000000000..210b7910f1cd --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -0,0 +1,5495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* Intel(R) Ethernet Connection E800 Series Linux Driver */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ice.h" + +#define DRV_VERSION "ice-0.7.0-k" +#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" +const char ice_drv_ver[] = DRV_VERSION; +static const char ice_driver_string[] = DRV_SUMMARY; +static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int debug = -1; +module_param(debug, int, 0644); +#ifndef CONFIG_DYNAMIC_DEBUG +MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); +#else +MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); +#endif /* !CONFIG_DYNAMIC_DEBUG */ + +static struct workqueue_struct *ice_wq; +static const struct net_device_ops ice_netdev_ops; + +static void ice_pf_dis_all_vsi(struct ice_pf *pf); +static void ice_rebuild(struct ice_pf *pf); +static int ice_vsi_release(struct ice_vsi *vsi); +static void ice_update_vsi_stats(struct ice_vsi *vsi); +static void ice_update_pf_stats(struct ice_pf *pf); + +/** + * ice_get_free_slot - get the next non-NULL location index in array + * @array: array to search + * @size: size of the array + * @curr: last known occupied index to be used as a search hint + * + * void * is being used to keep the functionality generic. This lets us use this + * function on any array of pointers. + */ +static int ice_get_free_slot(void *array, int size, int curr) +{ + int **tmp_array = (int **)array; + int next; + + if (curr < (size - 1) && !tmp_array[curr + 1]) { + next = curr + 1; + } else { + int i = 0; + + while ((i < size) && (tmp_array[i])) + i++; + if (i == size) + next = ICE_NO_VSI; + else + next = i; + } + return next; +} + +/** + * ice_search_res - Search the tracker for a block of resources + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * Returns the base item index of the block, or -ENOMEM for error + */ +static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +{ + int start = res->search_hint; + int end = start; + + id |= ICE_RES_VALID_BIT; + + do { + /* skip already allocated entries */ + if (res->list[end++] & ICE_RES_VALID_BIT) { + start = end; + if ((start + needed) > res->num_entries) + break; + } + + if (end == (start + needed)) { + int i = start; + + /* there was enough, so assign it to the requestor */ + while (i != end) + res->list[i++] = id; + + if (end == res->num_entries) + end = 0; + + res->search_hint = end; + return start; + } + } while (1); + + return -ENOMEM; +} + +/** + * ice_get_res - get a block of resources + * @pf: board private structure + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + * The search_hint trick and lack of advanced fit-finding only works + * because we're highly likely to have all the same sized requests. + * Linear search time and any fragmentation should be minimal. + */ +static int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +{ + int ret; + + if (!res || !pf) + return -EINVAL; + + if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { + dev_err(&pf->pdev->dev, + "param err: needed=%d, num_entries = %d id=0x%04x\n", + needed, res->num_entries, id); + return -EINVAL; + } + + /* search based on search_hint */ + ret = ice_search_res(res, needed, id); + + if (ret < 0) { + /* previous search failed. Reset search hint and try again */ + res->search_hint = 0; + ret = ice_search_res(res, needed, id); + } + + return ret; +} + +/** + * ice_free_res - free a block of resources + * @res: pointer to the resource + * @index: starting index previously returned by ice_get_res + * @id: identifier to track owner + * Returns number of resources freed + */ +static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +{ + int count = 0; + int i; + + if (!res || index >= res->num_entries) + return -EINVAL; + + id |= ICE_RES_VALID_BIT; + for (i = index; i < res->num_entries && res->list[i] == id; i++) { + res->list[i] = 0; + count++; + } + + return count; +} + +/** + * ice_add_mac_to_list - Add a mac address filter entry to the list + * @vsi: the VSI to be forwarded to + * @add_list: pointer to the list which contains MAC filter entries + * @macaddr: the MAC address to be added. + * + * Adds mac address filter entry to the temp list + * + * Returns 0 on success or ENOMEM on failure. + */ +static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src = vsi->vsi_num; + tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; + ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, add_list); + + return 0; +} + +/** + * ice_add_mac_to_sync_list - creates list of mac addresses to be synced + * @netdev: the net device on which the sync is happening + * @addr: mac address to sync + * + * This is a callback function which is called by the in kernel device sync + * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only + * populates the tmp_sync_list, which is later used by ice_add_mac to add the + * mac filters from the hardware. + */ +static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) + return -EINVAL; + + return 0; +} + +/** + * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced + * @netdev: the net device on which the unsync is happening + * @addr: mac address to unsync + * + * This is a callback function which is called by the in kernel device unsync + * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only + * populates the tmp_unsync_list, which is later used by ice_remove_mac to + * delete the mac filters from the hardware. + */ +static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) + return -EINVAL; + + return 0; +} + +/** + * ice_free_fltr_list - free filter lists helper + * @dev: pointer to the device struct + * @h: pointer to the list head to be freed + * + * Helper function to free filter lists previously created using + * ice_add_mac_to_list + */ +static void ice_free_fltr_list(struct device *dev, struct list_head *h) +{ + struct ice_fltr_list_entry *e, *tmp; + + list_for_each_entry_safe(e, tmp, h, list_entry) { + list_del(&e->list_entry); + devm_kfree(dev, e); + } +} + +/** + * ice_vsi_fltr_changed - check if filter state changed + * @vsi: VSI to be checked + * + * returns true if filter state has changed, false otherwise. + */ +static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) +{ + return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || + test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || + test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); +} + +/** + * ice_vsi_sync_fltr - Update the VSI filter list to the HW + * @vsi: ptr to the VSI + * + * Push any outstanding VSI filter changes through the AdminQ. + */ +static int ice_vsi_sync_fltr(struct ice_vsi *vsi) +{ + struct device *dev = &vsi->back->pdev->dev; + struct net_device *netdev = vsi->netdev; + bool promisc_forced_on = false; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status = 0; + u32 changed_flags = 0; + int err = 0; + + if (!vsi->netdev) + return -EINVAL; + + while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + + changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; + vsi->current_netdev_flags = vsi->netdev->flags; + + INIT_LIST_HEAD(&vsi->tmp_sync_list); + INIT_LIST_HEAD(&vsi->tmp_unsync_list); + + if (ice_vsi_fltr_changed(vsi)) { + clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); + clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + + /* grab the netdev's addr_list_lock */ + netif_addr_lock_bh(netdev); + __dev_uc_sync(netdev, ice_add_mac_to_sync_list, + ice_add_mac_to_unsync_list); + __dev_mc_sync(netdev, ice_add_mac_to_sync_list, + ice_add_mac_to_unsync_list); + /* our temp lists are populated. release lock */ + netif_addr_unlock_bh(netdev); + } + + /* Remove mac addresses in the unsync list */ + status = ice_remove_mac(hw, &vsi->tmp_unsync_list); + ice_free_fltr_list(dev, &vsi->tmp_unsync_list); + if (status) { + netdev_err(netdev, "Failed to delete MAC filters\n"); + /* if we failed because of alloc failures, just bail */ + if (status == ICE_ERR_NO_MEMORY) { + err = -ENOMEM; + goto out; + } + } + + /* Add mac addresses in the sync list */ + status = ice_add_mac(hw, &vsi->tmp_sync_list); + ice_free_fltr_list(dev, &vsi->tmp_sync_list); + if (status) { + netdev_err(netdev, "Failed to add MAC filters\n"); + /* If there is no more space for new umac filters, vsi + * should go into promiscuous mode. There should be some + * space reserved for promiscuous filters. + */ + if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && + !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, + vsi->state)) { + promisc_forced_on = true; + netdev_warn(netdev, + "Reached MAC filter limit, forcing promisc mode on VSI %d\n", + vsi->vsi_num); + } else { + err = -EIO; + goto out; + } + } + /* check for changes in promiscuous modes */ + if (changed_flags & IFF_ALLMULTI) + netdev_warn(netdev, "Unsupported configuration\n"); + + if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || + test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { + clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + if (vsi->current_netdev_flags & IFF_PROMISC) { + /* Apply TX filter rule to get traffic from VMs */ + status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + ICE_FLTR_TX); + if (status) { + netdev_err(netdev, "Error setting default VSI %i tx rule\n", + vsi->vsi_num); + vsi->current_netdev_flags &= ~IFF_PROMISC; + err = -EIO; + goto out_promisc; + } + /* Apply RX filter rule to get traffic from wire */ + status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + ICE_FLTR_RX); + if (status) { + netdev_err(netdev, "Error setting default VSI %i rx rule\n", + vsi->vsi_num); + vsi->current_netdev_flags &= ~IFF_PROMISC; + err = -EIO; + goto out_promisc; + } + } else { + /* Clear TX filter rule to stop traffic from VMs */ + status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + ICE_FLTR_TX); + if (status) { + netdev_err(netdev, "Error clearing default VSI %i tx rule\n", + vsi->vsi_num); + vsi->current_netdev_flags |= IFF_PROMISC; + err = -EIO; + goto out_promisc; + } + /* Clear filter RX to remove traffic from wire */ + status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + ICE_FLTR_RX); + if (status) { + netdev_err(netdev, "Error clearing default VSI %i rx rule\n", + vsi->vsi_num); + vsi->current_netdev_flags |= IFF_PROMISC; + err = -EIO; + goto out_promisc; + } + } + } + goto exit; + +out_promisc: + set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + goto exit; +out: + /* if something went wrong then set the changed flag so we try again */ + set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); +exit: + clear_bit(__ICE_CFG_BUSY, vsi->state); + return err; +} + +/** + * ice_sync_fltr_subtask - Sync the VSI filter list with HW + * @pf: board private structure + */ +static void ice_sync_fltr_subtask(struct ice_pf *pf) +{ + int v; + + if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) + return; + + clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); + + for (v = 0; v < pf->num_alloc_vsi; v++) + if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && + ice_vsi_sync_fltr(pf->vsi[v])) { + /* come back and try again later */ + set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); + break; + } +} + +/** + * ice_is_reset_recovery_pending - schedule a reset + * @state: pf state field + */ +static bool ice_is_reset_recovery_pending(unsigned long int *state) +{ + return test_bit(__ICE_RESET_RECOVERY_PENDING, state); +} + +/** + * ice_prepare_for_reset - prep for the core to reset + * @pf: board private structure + * + * Inform or close all dependent features in prep for reset. + */ +static void +ice_prepare_for_reset(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 v; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); + + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + + /* disable the VSIs and their queues that are not already DOWN */ + /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ + ice_pf_dis_all_vsi(pf); + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + pf->vsi[v]->vsi_num = 0; + + ice_shutdown_all_ctrlq(hw); +} + +/** + * ice_do_reset - Initiate one of many types of resets + * @pf: board private structure + * @reset_type: reset type requested + * before this function was called. + */ +static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) +{ + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + + dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); + WARN_ON(in_interrupt()); + + /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, we prepare for reset, issue the reset and + * rebuild sequentially. + */ + if (reset_type == ICE_RESET_PFR) { + set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + ice_prepare_for_reset(pf); + } + + /* trigger the reset */ + if (ice_reset(hw, reset_type)) { + dev_err(dev, "reset %d failed\n", reset_type); + set_bit(__ICE_RESET_FAILED, pf->state); + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + return; + } + + if (reset_type == ICE_RESET_PFR) { + pf->pfr_count++; + ice_rebuild(pf); + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + } +} + +/** + * ice_reset_subtask - Set up for resetting the device and driver + * @pf: board private structure + */ +static void ice_reset_subtask(struct ice_pf *pf) +{ + enum ice_reset_req reset_type; + + rtnl_lock(); + + /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an + * OICR interrupt. The OICR handler (ice_misc_intr) determines what + * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in + * pf->state. So if reset/recovery is pending (as indicated by this bit) + * we do a rebuild and return. + */ + if (ice_is_reset_recovery_pending(pf->state)) { + clear_bit(__ICE_GLOBR_RECV, pf->state); + clear_bit(__ICE_CORER_RECV, pf->state); + ice_prepare_for_reset(pf); + + /* make sure we are ready to rebuild */ + if (ice_check_reset(&pf->hw)) + set_bit(__ICE_RESET_FAILED, pf->state); + else + ice_rebuild(pf); + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + goto unlock; + } + + /* No pending resets to finish processing. Check for new resets */ + if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) + reset_type = ICE_RESET_GLOBR; + else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; + else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + reset_type = ICE_RESET_PFR; + else + goto unlock; + + /* reset if not already down or resetting */ + if (!test_bit(__ICE_DOWN, pf->state) && + !test_bit(__ICE_CFG_BUSY, pf->state)) { + ice_do_reset(pf, reset_type); + } + +unlock: + rtnl_unlock(); +} + +/** + * ice_watchdog_subtask - periodic tasks not using event driven scheduling + * @pf: board private structure + */ +static void ice_watchdog_subtask(struct ice_pf *pf) +{ + int i; + + /* if interface is down do nothing */ + if (test_bit(__ICE_DOWN, pf->state) || + test_bit(__ICE_CFG_BUSY, pf->state)) + return; + + /* make sure we don't do these things too often */ + if (time_before(jiffies, + pf->serv_tmr_prev + pf->serv_tmr_period)) + return; + + pf->serv_tmr_prev = jiffies; + + /* Update the stats for active netdevs so the network stack + * can look at updated numbers whenever it cares to + */ + ice_update_pf_stats(pf); + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->netdev) + ice_update_vsi_stats(pf->vsi[i]); +} + +/** + * ice_print_link_msg - print link up or down message + * @vsi: the VSI whose link status is being queried + * @isup: boolean for if the link is now up or down + */ +void ice_print_link_msg(struct ice_vsi *vsi, bool isup) +{ + const char *speed; + const char *fc; + + if (vsi->current_isup == isup) + return; + + vsi->current_isup = isup; + + if (!isup) { + netdev_info(vsi->netdev, "NIC Link is Down\n"); + return; + } + + switch (vsi->port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_40GB: + speed = "40 G"; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = "25 G"; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = "20 G"; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = "10 G"; + break; + case ICE_AQ_LINK_SPEED_5GB: + speed = "5 G"; + break; + case ICE_AQ_LINK_SPEED_2500MB: + speed = "2.5 G"; + break; + case ICE_AQ_LINK_SPEED_1000MB: + speed = "1 G"; + break; + case ICE_AQ_LINK_SPEED_100MB: + speed = "100 M"; + break; + default: + speed = "Unknown"; + break; + } + + switch (vsi->port_info->fc.current_mode) { + case ICE_FC_FULL: + fc = "RX/TX"; + break; + case ICE_FC_TX_PAUSE: + fc = "TX"; + break; + case ICE_FC_RX_PAUSE: + fc = "RX"; + break; + default: + fc = "Unknown"; + break; + } + + netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", + speed, fc); +} + +/** + * ice_init_link_events - enable/initialize link events + * @pi: pointer to the port_info instance + * + * Returns -EIO on failure, 0 on success + */ +static int ice_init_link_events(struct ice_port_info *pi) +{ + u16 mask; + + mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + + if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to set link event mask for port %d\n", + pi->lport); + return -EIO; + } + + if (ice_aq_get_link_info(pi, true, NULL, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to enable link events for port %d\n", + pi->lport); + return -EIO; + } + + return 0; +} + +/** + * ice_vsi_link_event - update the vsi's netdev + * @vsi: the vsi on which the link event occurred + * @link_up: whether or not the vsi needs to be set up or down + */ +static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) +{ + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; + + if (vsi->type == ICE_VSI_PF) { + if (!vsi->netdev) { + dev_dbg(&vsi->back->pdev->dev, + "vsi->netdev is not initialized!\n"); + return; + } + if (link_up) { + netif_carrier_on(vsi->netdev); + netif_tx_wake_all_queues(vsi->netdev); + } else { + netif_carrier_off(vsi->netdev); + netif_tx_stop_all_queues(vsi->netdev); + } + } +} + +/** + * ice_link_event - process the link event + * @pf: pf that the link event is associated with + * @pi: port_info for the port that the link event is associated with + * + * Returns -EIO if ice_get_link_status() fails + * Returns 0 on success + */ +static int +ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) +{ + u8 new_link_speed, old_link_speed; + struct ice_phy_info *phy_info; + bool new_link_same_as_old; + bool new_link, old_link; + u8 lport; + u16 v; + + phy_info = &pi->phy; + phy_info->link_info_old = phy_info->link_info; + /* Force ice_get_link_status() to update link info */ + phy_info->get_link_info = true; + + old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); + old_link_speed = phy_info->link_info_old.link_speed; + + lport = pi->lport; + if (ice_get_link_status(pi, &new_link)) { + dev_dbg(&pf->pdev->dev, + "Could not get link status for port %d\n", lport); + return -EIO; + } + + new_link_speed = phy_info->link_info.link_speed; + + new_link_same_as_old = (new_link == old_link && + new_link_speed == old_link_speed); + + ice_for_each_vsi(pf, v) { + struct ice_vsi *vsi = pf->vsi[v]; + + if (!vsi || !vsi->port_info) + continue; + + if (new_link_same_as_old && + (test_bit(__ICE_DOWN, vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) + continue; + + if (vsi->port_info->lport == lport) { + ice_print_link_msg(vsi, new_link); + ice_vsi_link_event(vsi, new_link); + } + } + + return 0; +} + +/** + * ice_handle_link_event - handle link event via ARQ + * @pf: pf that the link event is associated with + * + * Return -EINVAL if port_info is null + * Return status on succes + */ +static int ice_handle_link_event(struct ice_pf *pf) +{ + struct ice_port_info *port_info; + int status; + + port_info = pf->hw.port_info; + if (!port_info) + return -EINVAL; + + status = ice_link_event(pf, port_info); + if (status) + dev_dbg(&pf->pdev->dev, + "Could not process link event, error %d\n", status); + + return status; +} + +/** + * __ice_clean_ctrlq - helper function to clean controlq rings + * @pf: ptr to struct ice_pf + * @q_type: specific Control queue type + */ +static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) +{ + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + struct ice_ctl_q_info *cq; + u16 pending, i = 0; + const char *qtype; + u32 oldval, val; + + /* Do not clean control queue if/when PF reset fails */ + if (test_bit(__ICE_RESET_FAILED, pf->state)) + return 0; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + cq = &hw->adminq; + qtype = "Admin"; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", + q_type); + return 0; + } + + /* check for error indications - PF_xx_AxQLEN register layout for + * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. + */ + val = rd32(hw, cq->rq.len); + if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | + PF_FW_ARQLEN_ARQCRIT_M)) { + oldval = val; + if (val & PF_FW_ARQLEN_ARQVFE_M) + dev_dbg(&pf->pdev->dev, + "%s Receive Queue VF Error detected\n", qtype); + if (val & PF_FW_ARQLEN_ARQOVFL_M) { + dev_dbg(&pf->pdev->dev, + "%s Receive Queue Overflow Error detected\n", + qtype); + } + if (val & PF_FW_ARQLEN_ARQCRIT_M) + dev_dbg(&pf->pdev->dev, + "%s Receive Queue Critical Error detected\n", + qtype); + val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | + PF_FW_ARQLEN_ARQCRIT_M); + if (oldval != val) + wr32(hw, cq->rq.len, val); + } + + val = rd32(hw, cq->sq.len); + if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | + PF_FW_ATQLEN_ATQCRIT_M)) { + oldval = val; + if (val & PF_FW_ATQLEN_ATQVFE_M) + dev_dbg(&pf->pdev->dev, + "%s Send Queue VF Error detected\n", qtype); + if (val & PF_FW_ATQLEN_ATQOVFL_M) { + dev_dbg(&pf->pdev->dev, + "%s Send Queue Overflow Error detected\n", + qtype); + } + if (val & PF_FW_ATQLEN_ATQCRIT_M) + dev_dbg(&pf->pdev->dev, + "%s Send Queue Critical Error detected\n", + qtype); + val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | + PF_FW_ATQLEN_ATQCRIT_M); + if (oldval != val) + wr32(hw, cq->sq.len, val); + } + + event.buf_len = cq->rq_buf_size; + event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, + GFP_KERNEL); + if (!event.msg_buf) + return 0; + + do { + enum ice_status ret; + u16 opcode; + + ret = ice_clean_rq_elem(hw, cq, &event, &pending); + if (ret == ICE_ERR_AQ_NO_WORK) + break; + if (ret) { + dev_err(&pf->pdev->dev, + "%s Receive Queue event error %d\n", qtype, + ret); + break; + } + + opcode = le16_to_cpu(event.desc.opcode); + + switch (opcode) { + case ice_aqc_opc_get_link_status: + if (ice_handle_link_event(pf)) + dev_err(&pf->pdev->dev, + "Could not handle link event"); + break; + default: + dev_dbg(&pf->pdev->dev, + "%s Receive Queue unknown event 0x%04x ignored\n", + qtype, opcode); + break; + } + } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); + + devm_kfree(&pf->pdev->dev, event.msg_buf); + + return pending && (i == ICE_DFLT_IRQ_WORK); +} + +/** + * ice_clean_adminq_subtask - clean the AdminQ rings + * @pf: board private structure + */ +static void ice_clean_adminq_subtask(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 val; + + if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + return; + + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) + return; + + clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + + /* re-enable Admin queue interrupt causes */ + val = rd32(hw, PFINT_FW_CTL); + wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); + + ice_flush(hw); +} + +/** + * ice_service_task_schedule - schedule the service task to wake up + * @pf: board private structure + * + * If not already scheduled, this puts the task into the work queue. + */ +static void ice_service_task_schedule(struct ice_pf *pf) +{ + if (!test_bit(__ICE_DOWN, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + queue_work(ice_wq, &pf->serv_task); +} + +/** + * ice_service_task_complete - finish up the service task + * @pf: board private structure + */ +static void ice_service_task_complete(struct ice_pf *pf) +{ + WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); + + /* force memory (pf->state) to sync before next service task */ + smp_mb__before_atomic(); + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** + * ice_service_timer - timer callback to schedule service task + * @t: pointer to timer_list + */ +static void ice_service_timer(struct timer_list *t) +{ + struct ice_pf *pf = from_timer(pf, t, serv_tmr); + + mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); + ice_service_task_schedule(pf); +} + +/** + * ice_service_task - manage and run subtasks + * @work: pointer to work_struct contained by the PF struct + */ +static void ice_service_task(struct work_struct *work) +{ + struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); + unsigned long start_time = jiffies; + + /* subtasks */ + + /* process reset requests first */ + ice_reset_subtask(pf); + + /* bail if a reset/recovery cycle is pending */ + if (ice_is_reset_recovery_pending(pf->state) || + test_bit(__ICE_SUSPENDED, pf->state)) { + ice_service_task_complete(pf); + return; + } + + ice_sync_fltr_subtask(pf); + ice_watchdog_subtask(pf); + ice_clean_adminq_subtask(pf); + + /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ + ice_service_task_complete(pf); + + /* If the tasks have taken longer than one service timer period + * or there is more work to be done, reset the service timer to + * schedule the service task now. + */ + if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) + mod_timer(&pf->serv_tmr, jiffies); +} + +/** + * ice_set_ctrlq_len - helper function to set controlq length + * @hw: pointer to the hw instance + */ +static void ice_set_ctrlq_len(struct ice_hw *hw) +{ + hw->adminq.num_rq_entries = ICE_AQ_LEN; + hw->adminq.num_sq_entries = ICE_AQ_LEN; + hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; + hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; +} + +/** + * ice_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + */ +static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct ice_q_vector *q_vector = + container_of(notify, struct ice_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * ice_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + */ +static void ice_irq_affinity_release(struct kref __always_unused *ref) {} + +/** + * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + */ +static void ice_vsi_dis_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int base = vsi->base_vector; + u32 val; + int i; + + /* disable interrupt causation from each queue */ + if (vsi->tx_rings) { + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i]) { + u16 reg; + + reg = vsi->tx_rings[i]->reg_idx; + val = rd32(hw, QINT_TQCTL(reg)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(reg), val); + } + } + } + + if (vsi->rx_rings) { + ice_for_each_rxq(vsi, i) { + if (vsi->rx_rings[i]) { + u16 reg; + + reg = vsi->rx_rings[i]->reg_idx; + val = rd32(hw, QINT_RQCTL(reg)); + val &= ~QINT_RQCTL_CAUSE_ENA_M; + wr32(hw, QINT_RQCTL(reg), val); + } + } + } + + /* disable each interrupt */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + for (i = vsi->base_vector; + i < (vsi->num_q_vectors + vsi->base_vector); i++) + wr32(hw, GLINT_DYN_CTL(i), 0); + + ice_flush(hw); + for (i = 0; i < vsi->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } +} + +/** + * ice_vsi_ena_irq - Enable IRQ for the given VSI + * @vsi: the VSI being configured + */ +static int ice_vsi_ena_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + int i; + + for (i = 0; i < vsi->num_q_vectors; i++) + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); + } + + ice_flush(hw); + return 0; +} + +/** + * ice_vsi_delete - delete a VSI from the switch + * @vsi: pointer to VSI being removed + */ +static void ice_vsi_delete(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_vsi_ctx ctxt; + enum ice_status status; + + ctxt.vsi_num = vsi->vsi_num; + + memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); + + status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); + if (status) + dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", + vsi->vsi_num); +} + +/** + * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI + * @vsi: the VSI being configured + * @basename: name for the vector + */ +static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) +{ + int q_vectors = vsi->num_q_vectors; + struct ice_pf *pf = vsi->back; + int base = vsi->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + int irq_num; + + for (vector = 0; vector < q_vectors; vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[vector]; + + irq_num = pf->msix_entries[base + vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = devm_request_irq(&pf->pdev->dev, + pf->msix_entries[base + vector].vector, + vsi->irq_handler, 0, q_vector->name, + q_vector); + if (err) { + netdev_err(vsi->netdev, + "MSIX request_irq failed, error: %d\n", err); + goto free_q_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ice_irq_affinity_notify; + q_vector->affinity_notify.release = ice_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + } + + vsi->irqs_ready = true; + return 0; + +free_q_irqs: + while (vector) { + vector--; + irq_num = pf->msix_entries[base + vector].vector, + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); + } + return err; +} + +/** + * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type + * @vsi: the VSI being configured + */ +static void ice_vsi_set_rss_params(struct ice_vsi *vsi) +{ + struct ice_hw_common_caps *cap; + struct ice_pf *pf = vsi->back; + + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + vsi->rss_size = 1; + return; + } + + cap = &pf->hw.func_caps.common_cap; + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + vsi->rss_table_size = cap->rss_table_size; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + break; + } +} + +/** + * ice_vsi_setup_q_map - Setup a VSI queue map + * @vsi: the VSI being configured + * @ctxt: VSI context structure + */ +static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +{ + u16 offset = 0, qmap = 0, numq_tc; + u16 pow = 0, max_rss = 0, qcount; + u16 qcount_tx = vsi->alloc_txq; + u16 qcount_rx = vsi->alloc_rxq; + bool ena_tc0 = false; + int i; + + /* at least TC0 should be enabled by default */ + if (vsi->tc_cfg.numtc) { + if (!(vsi->tc_cfg.ena_tc & BIT(0))) + ena_tc0 = true; + } else { + ena_tc0 = true; + } + + if (ena_tc0) { + vsi->tc_cfg.numtc++; + vsi->tc_cfg.ena_tc |= 1; + } + + numq_tc = qcount_rx / vsi->tc_cfg.numtc; + + /* TC mapping is a function of the number of Rx queues assigned to the + * VSI for each traffic class and the offset of these queues. + * The first 10 bits are for queue offset for TC0, next 4 bits for no:of + * queues allocated to TC0. No:of queues is a power-of-2. + * + * If TC is not enabled, the queue offset is set to 0, and allocate one + * queue, this way, traffic for the given TC will be sent to the default + * queue. + * + * Setup number and offset of Rx queues for all TCs for the VSI + */ + + /* qcount will change if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { + if (vsi->type == ICE_VSI_PF) + max_rss = ICE_MAX_LG_RSS_QS; + else + max_rss = ICE_MAX_SMALL_RSS_QS; + + qcount = min_t(int, numq_tc, max_rss); + qcount = min_t(int, qcount, vsi->rss_size); + } else { + qcount = numq_tc; + } + + /* find higher power-of-2 of qcount */ + pow = ilog2(qcount); + + if (!is_power_of_2(qcount)) + pow++; + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) { + /* TC is not enabled */ + vsi->tc_cfg.tc_info[i].qoffset = 0; + vsi->tc_cfg.tc_info[i].qcount = 1; + ctxt->info.tc_mapping[i] = 0; + continue; + } + + /* TC is enabled */ + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount = qcount; + + qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & + ICE_AQ_VSI_TC_Q_NUM_M); + offset += qcount; + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); + } + + vsi->num_txq = qcount_tx; + vsi->num_rxq = offset; + + /* Rx queue mapping */ + ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); + /* q_mapping buffer holds the info for the first queue allocated for + * this VSI in the PF space and also the number of queues associated + * with this VSI. + */ + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); + ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); +} + +/** + * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI + * @ctxt: the VSI context being set + * + * This initializes a default VSI context for all sections except the Queues. + */ +static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) +{ + u32 table = 0; + + memset(&ctxt->info, 0, sizeof(ctxt->info)); + /* VSI's should be allocated from shared pool */ + ctxt->alloc_from_pool = true; + /* Src pruning enabled by default */ + ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; + /* Traffic from VSI can be sent to LAN */ + ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; + /* Allow all packets untagged/tagged */ + ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & + ICE_AQ_VSI_PVLAN_MODE_M) >> + ICE_AQ_VSI_PVLAN_MODE_S); + /* Show VLAN/UP from packets in Rx descriptors */ + ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & + ICE_AQ_VSI_PVLAN_EMOD_M) >> + ICE_AQ_VSI_PVLAN_EMOD_S); + /* Have 1:1 UP mapping for both ingress/egress tables */ + table |= ICE_UP_TABLE_TRANSLATE(0, 0); + table |= ICE_UP_TABLE_TRANSLATE(1, 1); + table |= ICE_UP_TABLE_TRANSLATE(2, 2); + table |= ICE_UP_TABLE_TRANSLATE(3, 3); + table |= ICE_UP_TABLE_TRANSLATE(4, 4); + table |= ICE_UP_TABLE_TRANSLATE(5, 5); + table |= ICE_UP_TABLE_TRANSLATE(6, 6); + table |= ICE_UP_TABLE_TRANSLATE(7, 7); + ctxt->info.ingress_table = cpu_to_le32(table); + ctxt->info.egress_table = cpu_to_le32(table); + /* Have 1:1 UP mapping for outer to inner UP table */ + ctxt->info.outer_up_table = cpu_to_le32(table); + /* No Outer tag support outer_tag_flags remains to zero */ +} + +/** + * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI + * @ctxt: the VSI context being set + * @vsi: the VSI being configured + */ +static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) +{ + u8 lut_type, hash_type; + + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + return; + } + + ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | + ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & + ICE_AQ_VSI_Q_OPT_RSS_HASH_M); +} + +/** + * ice_vsi_add - Create a new VSI or fetch preallocated VSI + * @vsi: the VSI being configured + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command to create a new VSI. + */ +static int ice_vsi_add(struct ice_vsi *vsi) +{ + struct ice_vsi_ctx ctxt = { 0 }; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int ret = 0; + + switch (vsi->type) { + case ICE_VSI_PF: + ctxt.flags = ICE_AQ_VSI_TYPE_PF; + break; + default: + return -ENODEV; + } + + ice_set_dflt_vsi_ctx(&ctxt); + /* if the switch is in VEB mode, allow VSI loopback */ + if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + + /* Set LUT type and HASH type if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_set_rss_vsi_ctx(&ctxt, vsi); + + ctxt.info.sw_id = vsi->port_info->sw_id; + ice_vsi_setup_q_map(vsi, &ctxt); + + ret = ice_aq_add_vsi(hw, &ctxt, NULL); + if (ret) { + dev_err(&vsi->back->pdev->dev, + "Add VSI AQ call failed, err %d\n", ret); + return -EIO; + } + vsi->info = ctxt.info; + vsi->vsi_num = ctxt.vsi_num; + + return ret; +} + +/** + * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW + * @vsi: the VSI being cleaned up + */ +static void ice_vsi_release_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0; + u32 rxq = 0; + int i, q; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); + wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); + for (q = 0; q < q_vector->num_ring_tx; q++) { + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI + * @vsi: the VSI having rings deallocated + */ +static void ice_vsi_clear_rings(struct ice_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) { + for (i = 0; i < vsi->alloc_txq; i++) { + if (vsi->tx_rings[i]) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + } + } + } + if (vsi->rx_rings) { + for (i = 0; i < vsi->alloc_rxq; i++) { + if (vsi->rx_rings[i]) { + kfree_rcu(vsi->rx_rings[i], rcu); + vsi->rx_rings[i] = NULL; + } + } + } +} + +/** + * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI + * @vsi: VSI which is having rings allocated + */ +static int ice_vsi_alloc_rings(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + /* Allocate tx_rings */ + for (i = 0; i < vsi->alloc_txq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->txq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->netdev = vsi->netdev; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + + vsi->tx_rings[i] = ring; + } + + /* Allocate rx_rings */ + for (i = 0; i < vsi->alloc_rxq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->rxq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->netdev = vsi->netdev; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + vsi->rx_rings[i] = ring; + } + + return 0; + +err_out: + ice_vsi_clear_rings(vsi); + return -ENOMEM; +} + +/** + * ice_vsi_free_irq - Free the irq association with the OS + * @vsi: the VSI being configured + */ +static void ice_vsi_free_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int base = vsi->base_vector; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + int i; + + if (!vsi->q_vectors || !vsi->irqs_ready) + return; + + vsi->irqs_ready = false; + for (i = 0; i < vsi->num_q_vectors; i++) { + u16 vector = i + base; + int irq_num; + + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); + } + ice_vsi_release_msix(vsi); + } +} + +/** + * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW + * @vsi: the VSI being configured + */ +static void ice_vsi_cfg_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0, rxq = 0; + int i, q, itr; + u8 itr_gran; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + itr_gran = hw->itr_gran_200; + + if (q_vector->num_ring_rx) { + q_vector->rx.itr = + ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, + itr_gran); + q_vector->rx.latency_range = ICE_LOW_LATENCY; + } + + if (q_vector->num_ring_tx) { + q_vector->tx.itr = + ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, + itr_gran); + q_vector->tx.latency_range = ICE_LOW_LATENCY; + } + wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); + wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); + + /* Both Transmit Queue Interrupt Cause Control register + * and Receive Queue Interrupt Cause control register + * expects MSIX_INDX field to be the vector index + * within the function space and not the absolute + * vector index across PF or across device. + * For SR-IOV VF VSIs queue vector index always starts + * with 1 since first vector index(0) is used for OICR + * in VF space. Since VMDq and other PF VSIs are withtin + * the PF function space, use the vector index thats + * tracked for this PF. + */ + for (q = 0; q < q_vector->num_ring_tx; q++) { + u32 val; + + itr = ICE_TX_ITR; + val = QINT_TQCTL_CAUSE_ENA_M | + (itr << QINT_TQCTL_ITR_INDX_S) | + (vector << QINT_TQCTL_MSIX_INDX_S); + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + u32 val; + + itr = ICE_RX_ITR; + val = QINT_RQCTL_CAUSE_ENA_M | + (itr << QINT_RQCTL_ITR_INDX_S) | + (vector << QINT_RQCTL_MSIX_INDX_S); + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_ena_misc_vector - enable the non-queue interrupts + * @pf: board private structure + */ +static void ice_ena_misc_vector(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 val; + + /* clear things first */ + wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ + rd32(hw, PFINT_OICR); /* read to clear */ + + val = (PFINT_OICR_HLP_RDY_M | + PFINT_OICR_CPM_RDY_M | + PFINT_OICR_ECC_ERR_M | + PFINT_OICR_MAL_DETECT_M | + PFINT_OICR_GRST_M | + PFINT_OICR_PCI_EXCEPTION_M | + PFINT_OICR_GPIO_M | + PFINT_OICR_STORM_DETECT_M | + PFINT_OICR_HMC_ERR_M); + + wr32(hw, PFINT_OICR_ENA, val); + + /* SW_ITR_IDX = 0, but don't change INTENA */ + wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); +} + +/** + * ice_misc_intr - misc interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) +{ + struct ice_pf *pf = (struct ice_pf *)data; + struct ice_hw *hw = &pf->hw; + irqreturn_t ret = IRQ_NONE; + u32 oicr, ena_mask; + + set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + + oicr = rd32(hw, PFINT_OICR); + ena_mask = rd32(hw, PFINT_OICR_ENA); + + if (!(oicr & PFINT_OICR_INTEVENT_M)) + goto ena_intr; + + if (oicr & PFINT_OICR_GRST_M) { + u32 reset; + /* we have a reset warning */ + ena_mask &= ~PFINT_OICR_GRST_M; + reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> + GLGEN_RSTAT_RESET_TYPE_S; + + if (reset == ICE_RESET_CORER) + pf->corer_count++; + else if (reset == ICE_RESET_GLOBR) + pf->globr_count++; + else + pf->empr_count++; + + /* If a reset cycle isn't already in progress, we set a bit in + * pf->state so that the service task can start a reset/rebuild. + * We also make note of which reset happened so that peer + * devices/drivers can be informed. + */ + if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (reset == ICE_RESET_CORER) + set_bit(__ICE_CORER_RECV, pf->state); + else if (reset == ICE_RESET_GLOBR) + set_bit(__ICE_GLOBR_RECV, pf->state); + else + set_bit(__ICE_EMPR_RECV, pf->state); + + set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + } + } + + if (oicr & PFINT_OICR_HMC_ERR_M) { + ena_mask &= ~PFINT_OICR_HMC_ERR_M; + dev_dbg(&pf->pdev->dev, + "HMC Error interrupt - info 0x%x, data 0x%x\n", + rd32(hw, PFHMC_ERRORINFO), + rd32(hw, PFHMC_ERRORDATA)); + } + + /* Report and mask off any remaining unexpected interrupts */ + oicr &= ena_mask; + if (oicr) { + dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", + oicr); + /* If a critical error is pending there is no choice but to + * reset the device. + */ + if (oicr & (PFINT_OICR_PE_CRITERR_M | + PFINT_OICR_PCI_EXCEPTION_M | + PFINT_OICR_ECC_ERR_M)) { + set_bit(__ICE_PFR_REQ, pf->state); + ice_service_task_schedule(pf); + } + ena_mask &= ~oicr; + } + ret = IRQ_HANDLED; + +ena_intr: + /* re-enable interrupt causes that are not handled during this pass */ + wr32(hw, PFINT_OICR_ENA, ena_mask); + if (!test_bit(__ICE_DOWN, pf->state)) { + ice_service_task_schedule(pf); + ice_irq_dynamic_ena(hw, NULL, NULL); + } + + return ret; +} + +/** + * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors allotted + * through the MSI-X enabling code. On a constrained vector budget, we map Tx + * and Rx rings to the vector as "efficiently" as possible. + */ +static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +{ + int q_vectors = vsi->num_q_vectors; + int tx_rings_rem, rx_rings_rem; + int v_id; + + /* initially assigning remaining rings count to VSIs num queue value */ + tx_rings_rem = vsi->num_txq; + rx_rings_rem = vsi->num_rxq; + + for (v_id = 0; v_id < q_vectors; v_id++) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; + int tx_rings_per_v, rx_rings_per_v, q_id, q_base; + + /* Tx rings mapping to vector */ + tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); + q_vector->num_ring_tx = tx_rings_per_v; + q_vector->tx.ring = NULL; + q_base = vsi->num_txq - tx_rings_rem; + + for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + } + tx_rings_rem -= tx_rings_per_v; + + /* Rx rings mapping to vector */ + rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); + q_vector->num_ring_rx = rx_rings_per_v; + q_vector->rx.ring = NULL; + q_base = vsi->num_rxq - rx_rings_rem; + + for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + } + rx_rings_rem -= rx_rings_per_v; + } +} + +/** + * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + */ +static void ice_vsi_set_num_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + switch (vsi->type) { + case ICE_VSI_PF: + vsi->alloc_txq = pf->num_lan_tx; + vsi->alloc_rxq = pf->num_lan_rx; + vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); + vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi + * @vsi: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + */ +static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* allocate memory for both Tx and Rx ring pointers */ + vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->tx_rings) + goto err_txrings; + + vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->rx_rings) + goto err_rxrings; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, + vsi->num_q_vectors, + sizeof(struct ice_q_vector *), + GFP_KERNEL); + if (!vsi->q_vectors) + goto err_vectors; + } + + return 0; + +err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rx_rings); +err_rxrings: + devm_kfree(&pf->pdev->dev, vsi->tx_rings); +err_txrings: + return -ENOMEM; +} + +/** + * ice_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ice_vsi_alloc - Allocates the next available struct vsi in the PF + * @pf: board private structure + * @type: type of VSI + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +{ + struct ice_vsi *vsi = NULL; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->sw_mutex); + + /* If we have already allocated our maximum number of VSIs, + * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index + * is available to be populated + */ + if (pf->next_vsi == ICE_NO_VSI) { + dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); + goto unlock_pf; + } + + vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto unlock_pf; + + vsi->type = type; + vsi->back = pf; + set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; + vsi->work_lmt = ICE_DFLT_IRQ_WORK; + + ice_vsi_set_num_qs(vsi); + + switch (vsi->type) { + case ICE_VSI_PF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + + /* Setup default MSIX irq handler for VSI */ + vsi->irq_handler = ice_msix_clean_rings; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + goto unlock_pf; + } + + /* fill VSI slot in the PF struct */ + pf->vsi[pf->next_vsi] = vsi; + + /* prepare pf->next_vsi for next use */ + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); + goto unlock_pf; + +err_rings: + devm_kfree(&pf->pdev->dev, vsi); + vsi = NULL; +unlock_pf: + mutex_unlock(&pf->sw_mutex); + return vsi; +} + +/** + * ice_free_irq_msix_misc - Unroll misc vector setup + * @pf: board private structure + */ +static void ice_free_irq_msix_misc(struct ice_pf *pf) +{ + /* disable OICR interrupt */ + wr32(&pf->hw, PFINT_OICR_ENA, 0); + ice_flush(&pf->hw); + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { + synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); + devm_free_irq(&pf->pdev->dev, + pf->msix_entries[pf->oicr_idx].vector, pf); + } + + ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); +} + +/** + * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events + * @pf: board private structure + * + * This sets up the handler for MSIX 0, which is used to manage the + * non-queue interrupts, e.g. AdminQ and errors. This is not used + * when in MSI or Legacy interrupt mode. + */ +static int ice_req_irq_msix_misc(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int oicr_idx, err = 0; + u8 itr_gran; + u32 val; + + if (!pf->int_name[0]) + snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + + /* Do not request IRQ but do enable OICR interrupt since settings are + * lost during reset. Note that this function is called only during + * rebuild path and not while reset is in progress. + */ + if (ice_is_reset_recovery_pending(pf->state)) + goto skip_req_irq; + + /* reserve one vector in irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + if (oicr_idx < 0) + return oicr_idx; + + pf->oicr_idx = oicr_idx; + + err = devm_request_irq(&pf->pdev->dev, + pf->msix_entries[pf->oicr_idx].vector, + ice_misc_intr, 0, pf->int_name, pf); + if (err) { + dev_err(&pf->pdev->dev, + "devm_request_irq for %s failed: %d\n", + pf->int_name, err); + ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + return err; + } + +skip_req_irq: + ice_ena_misc_vector(pf); + + val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | + PFINT_OICR_CTL_CAUSE_ENA_M; + wr32(hw, PFINT_OICR_CTL, val); + + /* This enables Admin queue Interrupt causes */ + val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | + (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | + PFINT_FW_CTL_CAUSE_ENA_M; + wr32(hw, PFINT_FW_CTL, val); + + itr_gran = hw->itr_gran_200; + + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), + ITR_TO_REG(ICE_ITR_8K, itr_gran)); + + ice_flush(hw); + ice_irq_dynamic_ena(hw, NULL, NULL); + + return 0; +} + +/** + * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int offset, ret = 0; + + mutex_lock(&pf->avail_q_mutex); + /* look for contiguous block of queues for tx */ + offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, + 0, vsi->alloc_txq, 0); + if (offset < ICE_MAX_TXQS) { + int i; + + bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); + for (i = 0; i < vsi->alloc_txq; i++) + vsi->txq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + + /* look for contiguous block of queues for rx */ + offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, + 0, vsi->alloc_rxq, 0); + if (offset < ICE_MAX_RXQS) { + int i; + + bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); + for (i = 0; i < vsi->alloc_rxq; i++) + vsi->rxq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + mutex_unlock(&pf->avail_q_mutex); + + return ret; +} + +/** + * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i, index = 0; + + mutex_lock(&pf->avail_q_mutex); + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_txq; i++) { + index = find_next_zero_bit(pf->avail_txqs, + ICE_MAX_TXQS, index); + if (index < ICE_MAX_TXQS) { + set_bit(index, pf->avail_txqs); + vsi->txq_map[i] = index; + } else { + goto err_scatter_tx; + } + } + } + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_rxq; i++) { + index = find_next_zero_bit(pf->avail_rxqs, + ICE_MAX_RXQS, index); + if (index < ICE_MAX_RXQS) { + set_bit(index, pf->avail_rxqs); + vsi->rxq_map[i] = index; + } else { + goto err_scatter_rx; + } + } + } + + mutex_unlock(&pf->avail_q_mutex); + return 0; + +err_scatter_rx: + /* unflag any queues we have grabbed (i is failed position) */ + for (index = 0; index < i; index++) { + clear_bit(vsi->rxq_map[index], pf->avail_rxqs); + vsi->rxq_map[index] = 0; + } + i = vsi->alloc_txq; +err_scatter_tx: + /* i is either position of failed attempt or vsi->alloc_txq */ + for (index = 0; index < i; index++) { + clear_bit(vsi->txq_map[index], pf->avail_txqs); + vsi->txq_map[index] = 0; + } + + mutex_unlock(&pf->avail_q_mutex); + return -ENOMEM; +} + +/** + * ice_vsi_get_qs - Assign queues from PF to VSI + * @vsi: the VSI to assign queues to + * + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_get_qs(struct ice_vsi *vsi) +{ + int ret = 0; + + vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; + vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; + + /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping + * modes individually to scatter if assigning contiguous queues + * to rx or tx fails + */ + ret = ice_vsi_get_qs_contig(vsi); + if (ret < 0) { + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_txq = max_t(u16, vsi->alloc_txq, + ICE_MAX_SCATTER_TXQS); + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, + ICE_MAX_SCATTER_RXQS); + ret = ice_vsi_get_qs_scatter(vsi); + } + + return ret; +} + +/** + * ice_vsi_put_qs - Release queues from VSI to PF + * @vsi: the VSI thats going to release queues + */ +static void ice_vsi_put_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + mutex_lock(&pf->avail_q_mutex); + + for (i = 0; i < vsi->alloc_txq; i++) { + clear_bit(vsi->txq_map[i], pf->avail_txqs); + vsi->txq_map[i] = ICE_INVAL_Q_INDEX; + } + + for (i = 0; i < vsi->alloc_rxq; i++) { + clear_bit(vsi->rxq_map[i], pf->avail_rxqs); + vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; + } + + mutex_unlock(&pf->avail_q_mutex); +} + +/** + * ice_free_q_vector - Free memory allocated for a specific interrupt vector + * @vsi: VSI having the memory freed + * @v_idx: index of the vector to be freed + */ +static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_q_vector *q_vector; + struct ice_ring *ring; + + if (!vsi->q_vectors[v_idx]) { + dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", + v_idx); + return; + } + q_vector = vsi->q_vectors[v_idx]; + + ice_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + ice_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI with an associated netdev is set up with NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + devm_kfree(&vsi->back->pdev->dev, q_vector); + vsi->q_vectors[v_idx] = NULL; +} + +/** + * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI having memory freed + */ +static void ice_vsi_free_q_vectors(struct ice_vsi *vsi) +{ + int v_idx; + + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_free_q_vector(vsi, v_idx); +} + +/** + * ice_cfg_netdev - Setup the netdev flags + * @vsi: the VSI being configured + * + * Returns 0 on success, negative value on failure + */ +static int ice_cfg_netdev(struct ice_vsi *vsi) +{ + netdev_features_t csumo_features; + netdev_features_t vlano_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + struct ice_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + + netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), + vsi->alloc_txq, vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + tso_features = NETIF_F_TSO; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | + vlano_features | tso_features; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | + tso_features; + netdev->vlan_features |= dflt_features | csumo_features | + tso_features; + + if (vsi->type == ICE_VSI_PF) { + SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); + + ether_addr_copy(netdev->dev_addr, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* assign netdev_ops */ + netdev->netdev_ops = &ice_netdev_ops; + + /* setup watchdog timeout value to be 5 second */ + netdev->watchdog_timeo = 5 * HZ; + + ice_set_ethtool_ops(netdev); + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = ICE_MAX_MTU; + + return 0; +} + +/** + * ice_vsi_free_arrays - clean up vsi resources + * @vsi: pointer to VSI being cleared + * @free_qvectors: bool to specify if q_vectors should be deallocated + */ +static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* free the ring and vector containers */ + if (free_qvectors && vsi->q_vectors) { + devm_kfree(&pf->pdev->dev, vsi->q_vectors); + vsi->q_vectors = NULL; + } + if (vsi->tx_rings) { + devm_kfree(&pf->pdev->dev, vsi->tx_rings); + vsi->tx_rings = NULL; + } + if (vsi->rx_rings) { + devm_kfree(&pf->pdev->dev, vsi->rx_rings); + vsi->rx_rings = NULL; + } +} + +/** + * ice_vsi_clear - clean up and deallocate the provided vsi + * @vsi: pointer to VSI being cleared + * + * This deallocates the vsi's queue resources, removes it from the PF's + * VSI array if necessary, and deallocates the VSI + * + * Returns 0 on success, negative on failure + */ +static int ice_vsi_clear(struct ice_vsi *vsi) +{ + struct ice_pf *pf = NULL; + + if (!vsi) + return 0; + + if (!vsi->back) + return -EINVAL; + + pf = vsi->back; + + if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { + dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", + vsi->idx); + return -EINVAL; + } + + mutex_lock(&pf->sw_mutex); + /* updates the PF for this cleared vsi */ + + pf->vsi[vsi->idx] = NULL; + if (vsi->idx < pf->next_vsi) + pf->next_vsi = vsi->idx; + + ice_vsi_free_arrays(vsi, true); + mutex_unlock(&pf->sw_mutex); + devm_kfree(&pf->pdev->dev, vsi); + + return 0; +} + +/** + * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the vsi struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + /* tie q_vector and vsi together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int v_idx = 0, num_q_vectors; + int err; + + if (vsi->q_vectors[0]) { + dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->vsi_num); + return -EEXIST; + } + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + num_q_vectors = vsi->num_q_vectors; + } else { + err = -EINVAL; + goto err_out; + } + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ice_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ice_free_q_vector(vsi, v_idx); + + dev_err(&pf->pdev->dev, + "Failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->vsi_num, err); + vsi->num_q_vectors = 0; + return err; +} + +/** + * ice_vsi_setup_vector_base - Set up the base vector for the given VSI + * @vsi: ptr to the VSI + * + * This should only be called after ice_vsi_alloc() which allocates the + * corresponding SW VSI structure and initializes num_queue_pairs for the + * newly allocated VSI. + * + * Returns 0 on success or negative on failure + */ +static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int num_q_vectors = 0; + + if (vsi->base_vector) { + dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", + vsi->vsi_num, vsi->base_vector); + return -EEXIST; + } + + if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + return -ENOENT; + + switch (vsi->type) { + case ICE_VSI_PF: + num_q_vectors = vsi->num_q_vectors; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } + + if (num_q_vectors) + vsi->base_vector = ice_get_res(pf, pf->irq_tracker, + num_q_vectors, vsi->idx); + + if (vsi->base_vector < 0) { + dev_err(&pf->pdev->dev, + "Failed to get tracking for %d vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, vsi->base_vector); + return -ENOENT; + } + + return 0; +} + +/** + * ice_fill_rss_lut - Fill the RSS lookup table with default values + * @lut: Lookup table + * @rss_table_size: Lookup table size + * @rss_size: Range of queue number for hashing + */ +void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +{ + u16 i; + + for (i = 0; i < rss_table_size; i++) + lut[i] = i % rss_size; +} + +/** + * ice_vsi_cfg_rss - Configure RSS params for a VSI + * @vsi: VSI to be configured + */ +static int ice_vsi_cfg_rss(struct ice_vsi *vsi) +{ + u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; + struct ice_aqc_get_set_rss_keys *key; + struct ice_pf *pf = vsi->back; + enum ice_status status; + int err = 0; + u8 *lut; + + vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); + + lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); + + status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type, + lut, vsi->rss_table_size); + + if (status) { + dev_err(&vsi->back->pdev->dev, + "set_rss_lut failed, error %d\n", status); + err = -EIO; + goto ice_vsi_cfg_rss_exit; + } + + key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); + if (!key) { + err = -ENOMEM; + goto ice_vsi_cfg_rss_exit; + } + + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + else + netdev_rss_key_fill((void *)seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + memcpy(&key->standard_rss_key, seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + + status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key); + + if (status) { + dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", + status); + err = -EIO; + } + + devm_kfree(&pf->pdev->dev, key); +ice_vsi_cfg_rss_exit: + devm_kfree(&pf->pdev->dev, lut); + return err; +} + +/** + * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI + * @vsi: pointer to the ice_vsi + * + * This reallocates the VSIs queue resources + * + * Returns 0 on success and negative value on failure + */ +static int ice_vsi_reinit_setup(struct ice_vsi *vsi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + int ret, i; + + if (!vsi) + return -EINVAL; + + ice_vsi_free_q_vectors(vsi); + ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); + vsi->base_vector = 0; + ice_vsi_clear_rings(vsi); + ice_vsi_free_arrays(vsi, false); + ice_vsi_set_num_qs(vsi); + + /* Initialize VSI struct elements and create VSI in FW */ + ret = ice_vsi_add(vsi); + if (ret < 0) + goto err_vsi; + + ret = ice_vsi_alloc_arrays(vsi, false); + if (ret < 0) + goto err_vsi; + + switch (vsi->type) { + case ICE_VSI_PF: + if (!vsi->netdev) { + ret = ice_cfg_netdev(vsi); + if (ret) + goto err_rings; + + ret = register_netdev(vsi->netdev); + if (ret) + goto err_rings; + + netif_carrier_off(vsi->netdev); + netif_tx_stop_all_queues(vsi->netdev); + } + + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + ice_vsi_map_rings_to_vectors(vsi); + break; + default: + break; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, + vsi->tc_cfg.ena_tc, max_txqs); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed VSI lan queue config\n"); + goto err_vectors; + } + return 0; + +err_vectors: + ice_vsi_free_q_vectors(vsi); +err_rings: + if (vsi->netdev) { + vsi->current_netdev_flags = 0; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_vsi: + ice_vsi_clear(vsi); + set_bit(__ICE_RESET_FAILED, vsi->back->state); + return ret; +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @type: VSI type + * @pi: pointer to the port_info instance + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configure VSI sw struct on + * success, otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, + struct ice_port_info *pi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct device *dev = &pf->pdev->dev; + struct ice_vsi_ctx ctxt = { 0 }; + struct ice_vsi *vsi; + int ret, i; + + vsi = ice_vsi_alloc(pf, type); + if (!vsi) { + dev_err(dev, "could not allocate VSI\n"); + return NULL; + } + + vsi->port_info = pi; + vsi->vsw = pf->first_sw; + + if (ice_vsi_get_qs(vsi)) { + dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", + vsi->idx); + goto err_get_qs; + } + + /* set RSS capabilities */ + ice_vsi_set_rss_params(vsi); + + /* create the VSI */ + ret = ice_vsi_add(vsi); + if (ret) + goto err_vsi; + + ctxt.vsi_num = vsi->vsi_num; + + switch (vsi->type) { + case ICE_VSI_PF: + ret = ice_cfg_netdev(vsi); + if (ret) + goto err_cfg_netdev; + + ret = register_netdev(vsi->netdev); + if (ret) + goto err_register_netdev; + + netif_carrier_off(vsi->netdev); + + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(vsi->netdev); + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_msix; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_rings; + + ice_vsi_map_rings_to_vectors(vsi); + + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_vsi_cfg_rss(vsi); + break; + default: + /* if vsi type is not recognized, clean up the resources and + * exit + */ + goto err_rings; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, + vsi->tc_cfg.ena_tc, max_txqs); + if (ret) { + dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); + goto err_rings; + } + + return vsi; + +err_rings: + ice_vsi_free_q_vectors(vsi); +err_msix: + if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(vsi->netdev); +err_register_netdev: + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_cfg_netdev: + ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); + if (ret) + dev_err(&vsi->back->pdev->dev, + "Free VSI AQ call failed, err %d\n", ret); +err_vsi: + ice_vsi_put_qs(vsi); +err_get_qs: + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + ice_vsi_clear(vsi); + + return NULL; +} + +/** + * ice_vsi_add_vlan - Add vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be added + */ +static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + enum ice_status status; + int err = 0; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src = vsi->vsi_num; + tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; + tmp->fltr_info.l_data.vlan.vlan_id = vid; + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, &tmp_add_list); + + status = ice_add_vlan(&pf->hw, &tmp_add_list); + if (status) { + err = -ENODEV; + dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", + vid, vsi->vsi_num); + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return err; +} + +/** + * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload + * @netdev: network interface to be adjusted + * @proto: unused protocol + * @vid: vlan id to be added + * + * net_device_ops implementation for adding vlan ids + */ +static int ice_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + int ret = 0; + + if (vid >= VLAN_N_VID) { + netdev_err(netdev, "VLAN id requested %d is out of range %d\n", + vid, VLAN_N_VID); + return -EINVAL; + } + + if (vsi->info.pvid) + return -EINVAL; + + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is + * needed to continue allowing all untagged packets since VLAN prune + * list is applied to all packets by the switch + */ + ret = ice_vsi_add_vlan(vsi, vid); + + if (!ret) + set_bit(vid, vsi->active_vlans); + + return ret; +} + +/** + * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN + * @vsi: the VSI being configured + * @vid: VLAN id to be removed + */ +static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *list; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + + list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + if (!list) + return; + + list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; + list->fltr_info.fltr_act = ICE_FWD_TO_VSI; + list->fltr_info.l_data.vlan.vlan_id = vid; + list->fltr_info.flag = ICE_FLTR_TX; + list->fltr_info.src = vsi->vsi_num; + + INIT_LIST_HEAD(&list->list_entry); + list_add(&list->list_entry, &tmp_add_list); + + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) + dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", + vid, vsi->vsi_num); + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); +} + +/** + * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload + * @netdev: network interface to be adjusted + * @proto: unused protocol + * @vid: vlan id to be removed + * + * net_device_ops implementation for removing vlan ids + */ +static int ice_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + if (vsi->info.pvid) + return -EINVAL; + + /* return code is ignored as there is nothing a user + * can do about failure to remove and a log message was + * already printed from the other function + */ + ice_vsi_kill_vlan(vsi, vid); + + clear_bit(vid, vsi->active_vlans); + + return 0; +} + +/** + * ice_setup_pf_sw - Setup the HW switch on startup or after reset + * @pf: board private structure + * + * Returns 0 on success, negative value on failure + */ +static int ice_setup_pf_sw(struct ice_pf *pf) +{ + LIST_HEAD(tmp_add_list); + u8 broadcast[ETH_ALEN]; + struct ice_vsi *vsi; + int status = 0; + + if (!ice_is_reset_recovery_pending(pf->state)) { + vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto error_exit; + } + } else { + vsi = pf->vsi[0]; + status = ice_vsi_reinit_setup(vsi); + if (status < 0) + return -EIO; + } + + /* tmp_add_list contains a list of MAC addresses for which MAC + * filters need to be programmed. Add the VSI's unicast MAC to + * this list + */ + status = ice_add_mac_to_list(vsi, &tmp_add_list, + vsi->port_info->mac.perm_addr); + if (status) + goto error_exit; + + /* VSI needs to receive broadcast traffic, so add the broadcast + * MAC address to the list. + */ + eth_broadcast_addr(broadcast); + status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); + if (status) + goto error_exit; + + /* program MAC filters for entries in tmp_add_list */ + status = ice_add_mac(&pf->hw, &tmp_add_list); + if (status) { + dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); + status = -ENOMEM; + goto error_exit; + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; + +error_exit: + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + + if (vsi) { + ice_vsi_free_q_vectors(vsi); + if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(vsi->netdev); + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + + ice_vsi_delete(vsi); + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + ice_vsi_clear(vsi); + } + return status; +} + +/** + * ice_determine_q_usage - Calculate queue distribution + * @pf: board private structure + * + * Return -ENOMEM if we don't get enough queues for all ports + */ +static void ice_determine_q_usage(struct ice_pf *pf) +{ + u16 q_left_tx, q_left_rx; + + q_left_tx = pf->hw.func_caps.common_cap.num_txq; + q_left_rx = pf->hw.func_caps.common_cap.num_rxq; + + pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); + + /* only 1 rx queue unless RSS is enabled */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + pf->num_lan_rx = 1; + else + pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); + + pf->q_left_tx = q_left_tx - pf->num_lan_tx; + pf->q_left_rx = q_left_rx - pf->num_lan_rx; +} + +/** + * ice_deinit_pf - Unrolls initialziations done by ice_init_pf + * @pf: board private structure to initialize + */ +static void ice_deinit_pf(struct ice_pf *pf) +{ + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + mutex_destroy(&pf->sw_mutex); + mutex_destroy(&pf->avail_q_mutex); +} + +/** + * ice_init_pf - Initialize general software structures (struct ice_pf) + * @pf: board private structure to initialize + */ +static void ice_init_pf(struct ice_pf *pf) +{ + bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); + set_bit(ICE_FLAG_MSIX_ENA, pf->flags); + + mutex_init(&pf->sw_mutex); + mutex_init(&pf->avail_q_mutex); + + /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ + mutex_lock(&pf->avail_q_mutex); + bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); + bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); + mutex_unlock(&pf->avail_q_mutex); + + if (pf->hw.func_caps.common_cap.rss_table_size) + set_bit(ICE_FLAG_RSS_ENA, pf->flags); + + /* setup service timer and periodic service task */ + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task); + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** + * ice_ena_msix_range - Request a range of MSIX vectors from the OS + * @pf: board private structure + * + * compute the number of MSIX vectors required (v_budget) and request from + * the OS. Return the number of vectors reserved or negative on failure + */ +static int ice_ena_msix_range(struct ice_pf *pf) +{ + int v_left, v_actual, v_budget = 0; + int needed, err, i; + + v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + + /* reserve one vector for miscellaneous handler */ + needed = 1; + v_budget += needed; + v_left -= needed; + + /* reserve vectors for LAN traffic */ + pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); + v_budget += pf->num_lan_msix; + + pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + + if (!pf->msix_entries) { + err = -ENOMEM; + goto exit_err; + } + + for (i = 0; i < v_budget; i++) + pf->msix_entries[i].entry = i; + + /* actually reserve the vectors */ + v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, + ICE_MIN_MSIX, v_budget); + + if (v_actual < 0) { + dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); + err = v_actual; + goto msix_err; + } + + if (v_actual < v_budget) { + dev_warn(&pf->pdev->dev, + "not enough vectors. requested = %d, obtained = %d\n", + v_budget, v_actual); + if (v_actual >= (pf->num_lan_msix + 1)) { + pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1); + } else if (v_actual >= 2) { + pf->num_lan_msix = 1; + pf->num_avail_msix = v_actual - 2; + } else { + pci_disable_msix(pf->pdev); + err = -ERANGE; + goto msix_err; + } + } + + return v_actual; + +msix_err: + devm_kfree(&pf->pdev->dev, pf->msix_entries); + goto exit_err; + +exit_err: + pf->num_lan_msix = 0; + clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); + return err; +} + +/** + * ice_dis_msix - Disable MSI-X interrupt setup in OS + * @pf: board private structure + */ +static void ice_dis_msix(struct ice_pf *pf) +{ + pci_disable_msix(pf->pdev); + devm_kfree(&pf->pdev->dev, pf->msix_entries); + pf->msix_entries = NULL; + clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); +} + +/** + * ice_init_interrupt_scheme - Determine proper interrupt scheme + * @pf: board private structure to initialize + */ +static int ice_init_interrupt_scheme(struct ice_pf *pf) +{ + int vectors = 0; + ssize_t size; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + vectors = ice_ena_msix_range(pf); + else + return -ENODEV; + + if (vectors < 0) + return vectors; + + /* set up vector assignment tracking */ + size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); + + pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!pf->irq_tracker) { + ice_dis_msix(pf); + return -ENOMEM; + } + + pf->irq_tracker->num_entries = vectors; + + return 0; +} + +/** + * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme + * @pf: board private structure + */ +static void ice_clear_interrupt_scheme(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_dis_msix(pf); + + devm_kfree(&pf->pdev->dev, pf->irq_tracker); + pf->irq_tracker = NULL; +} + +/** + * ice_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in ice_pci_tbl + * + * Returns 0 on success, negative on failure + */ +static int ice_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct ice_pf *pf; + struct ice_hw *hw; + int err; + + /* this driver uses devres, see Documentation/driver-model/devres.txt */ + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); + if (err) { + dev_err(&pdev->dev, "I/O map error %d\n", err); + return err; + } + + pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); + if (!pf) + return -ENOMEM; + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + return err; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(__ICE_DOWN, pf->state); + + hw = &pf->hw; + hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; + hw->back = pf; + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + ice_set_ctrlq_len(hw); + + pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + +#ifndef CONFIG_DYNAMIC_DEBUG + if (debug < -1) + hw->debug_mask = debug; +#endif + + err = ice_init_hw(hw); + if (err) { + dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); + err = -EIO; + goto err_exit_unroll; + } + + dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, + hw->api_maj_ver, hw->api_min_ver); + + ice_init_pf(pf); + + ice_determine_q_usage(pf); + + pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, + hw->func_caps.guaranteed_num_vsi); + if (!pf->num_alloc_vsi) { + err = -EIO; + goto err_init_pf_unroll; + } + + pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, + sizeof(struct ice_vsi *), GFP_KERNEL); + if (!pf->vsi) { + err = -ENOMEM; + goto err_init_pf_unroll; + } + + err = ice_init_interrupt_scheme(pf); + if (err) { + dev_err(&pdev->dev, + "ice_init_interrupt_scheme failed: %d\n", err); + err = -EIO; + goto err_init_interrupt_unroll; + } + + /* In case of MSIX we are going to setup the misc vector right here + * to handle admin queue events etc. In case of legacy and MSI + * the misc functionality and queue processing is combined in + * the same vector and that gets setup at open. + */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(&pdev->dev, + "setup of misc vector failed: %d\n", err); + goto err_init_interrupt_unroll; + } + } + + /* create switch struct for the switch element created by FW on boot */ + pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), + GFP_KERNEL); + if (!pf->first_sw) { + err = -ENOMEM; + goto err_msix_misc_unroll; + } + + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + pf->first_sw->pf = pf; + + /* record the sw_id available for later use */ + pf->first_sw->sw_id = hw->port_info->sw_id; + + err = ice_setup_pf_sw(pf); + if (err) { + dev_err(&pdev->dev, + "probe failed due to setup pf switch:%d\n", err); + goto err_alloc_sw_unroll; + } + + /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + + /* since everything is good, start the service timer */ + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + + err = ice_init_link_events(pf->hw.port_info); + if (err) { + dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + return 0; + +err_alloc_sw_unroll: + set_bit(__ICE_DOWN, pf->state); + devm_kfree(&pf->pdev->dev, pf->first_sw); +err_msix_misc_unroll: + ice_free_irq_msix_misc(pf); +err_init_interrupt_unroll: + ice_clear_interrupt_scheme(pf); + devm_kfree(&pdev->dev, pf->vsi); +err_init_pf_unroll: + ice_deinit_pf(pf); + ice_deinit_hw(hw); +err_exit_unroll: + pci_disable_pcie_error_reporting(pdev); + return err; +} + +/** + * ice_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void ice_remove(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + int i = 0; + int err; + + if (!pf) + return; + + set_bit(__ICE_DOWN, pf->state); + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", + i, err); + } + + ice_free_irq_msix_misc(pf); + ice_clear_interrupt_scheme(pf); + ice_deinit_pf(pf); + ice_deinit_hw(&pf->hw); + pci_disable_pcie_error_reporting(pdev); +} + +/* ice_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ice_pci_tbl[] = { + { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 }, + /* required last entry */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ice_pci_tbl); + +static struct pci_driver ice_driver = { + .name = KBUILD_MODNAME, + .id_table = ice_pci_tbl, + .probe = ice_probe, + .remove = ice_remove, +}; + +/** + * ice_module_init - Driver registration routine + * + * ice_module_init is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init ice_module_init(void) +{ + int status; + + pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); + pr_info("%s\n", ice_copyright); + + ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + if (!ice_wq) { + pr_err("Failed to create workqueue\n"); + return -ENOMEM; + } + + status = pci_register_driver(&ice_driver); + if (status) { + pr_err("failed to register pci driver, err %d\n", status); + destroy_workqueue(ice_wq); + } + + return status; +} +module_init(ice_module_init); + +/** + * ice_module_exit - Driver exit cleanup routine + * + * ice_module_exit is called just before the driver is removed + * from memory. + */ +static void __exit ice_module_exit(void) +{ + pci_unregister_driver(&ice_driver); + destroy_workqueue(ice_wq); + pr_info("module unloaded\n"); +} +module_exit(ice_module_exit); + +/** + * ice_set_mac_address - NDO callback to set mac address + * @netdev: network interface device structure + * @pi: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int ice_set_mac_address(struct net_device *netdev, void *pi) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + struct sockaddr *addr = pi; + enum ice_status status; + LIST_HEAD(a_mac_list); + LIST_HEAD(r_mac_list); + u8 flags = 0; + int err; + u8 *mac; + + mac = (u8 *)addr->sa_data; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, mac)) { + netdev_warn(netdev, "already using mac %pM\n", mac); + return 0; + } + + if (test_bit(__ICE_DOWN, pf->state) || + ice_is_reset_recovery_pending(pf->state)) { + netdev_err(netdev, "can't set mac %pM. device not ready\n", + mac); + return -EBUSY; + } + + /* When we change the mac address we also have to change the mac address + * based filter rules that were created previously for the old mac + * address. So first, we remove the old filter rule using ice_remove_mac + * and then create a new filter rule using ice_add_mac. Note that for + * both these operations, we first need to form a "list" of mac + * addresses (even though in this case, we have only 1 mac address to be + * added/removed) and this done using ice_add_mac_to_list. Depending on + * the ensuing operation this "list" of mac addresses is either to be + * added or removed from the filter. + */ + err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); + if (err) { + err = -EADDRNOTAVAIL; + goto free_lists; + } + + status = ice_remove_mac(hw, &r_mac_list); + if (status) { + err = -EADDRNOTAVAIL; + goto free_lists; + } + + err = ice_add_mac_to_list(vsi, &a_mac_list, mac); + if (err) { + err = -EADDRNOTAVAIL; + goto free_lists; + } + + status = ice_add_mac(hw, &a_mac_list); + if (status) { + err = -EADDRNOTAVAIL; + goto free_lists; + } + +free_lists: + /* free list entries */ + ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); + ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); + + if (err) { + netdev_err(netdev, "can't set mac %pM. filter update failed\n", + mac); + return err; + } + + /* change the netdev's mac address */ + memcpy(netdev->dev_addr, mac, netdev->addr_len); + netdev_dbg(vsi->netdev, "updated mac address to %pM\n", + netdev->dev_addr); + + /* write new mac address to the firmware */ + flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; + status = ice_aq_manage_mac_write(hw, mac, flags, NULL); + if (status) { + netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", + mac); + } + return 0; +} + +/** + * ice_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + */ +static void ice_set_rx_mode(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + if (!vsi) + return; + + /* Set the flags to synchronize filters + * ndo_set_rx_mode may be triggered even without a change in netdev + * flags + */ + set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + ice_service_task_schedule(vsi->back); +} + +/** + * ice_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @vid: VLAN id + * @flags: instructions from stack about fdb operation + */ +static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags) +{ + int err; + + if (vid) { + netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); + return -EINVAL; + } + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + netdev_err(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +/** + * ice_fdb_del - delete an entry from the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @vid: VLAN id + */ +static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + __always_unused u16 vid) +{ + int err; + + if (ndm->ndm_state & NUD_PERMANENT) { + netdev_err(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + + return err; +} + +/** + * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx + * @vsi: the vsi being changed + */ +static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring the VSI to let the driver add VLAN tags by + * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN + * tag insertion happens in the Tx hot path, in ice_tx_map. + */ + ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt.vsi_num = vsi->vsi_num; + + status = ice_aq_update_vsi(hw, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; + return 0; +} + +/** + * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx + * @vsi: the vsi being changed + * @ena: boolean value indicating if this is a enable or disable request + */ +static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring what the VSI should do with the VLAN tag in + * the Rx packet. We can either leave the tag in the packet or put it in + * the Rx descriptor. + */ + if (ena) { + /* Strip VLAN tag from Rx packet and put it in the desc */ + ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; + } else { + /* Disable stripping. Leave tag in packet */ + ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; + } + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt.vsi_num = vsi->vsi_num; + + status = ice_aq_update_vsi(hw, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n", + ena, status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; + return 0; +} + +/** + * ice_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + */ +static int ice_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + int ret = 0; + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && + !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) + ret = ice_vsi_manage_vlan_stripping(vsi, true); + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) + ret = ice_vsi_manage_vlan_stripping(vsi, false); + else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && + !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) + ret = ice_vsi_manage_vlan_insertion(vsi); + else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) + ret = ice_vsi_manage_vlan_insertion(vsi); + + return ret; +} + +/** + * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI + * @vsi: VSI to setup vlan properties for + */ +static int ice_vsi_vlan_setup(struct ice_vsi *vsi) +{ + int ret = 0; + + if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + ret = ice_vsi_manage_vlan_stripping(vsi, true); + if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + ret = ice_vsi_manage_vlan_insertion(vsi); + + return ret; +} + +/** + * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up + * @vsi: the VSI being brought back up + */ +static int ice_restore_vlan(struct ice_vsi *vsi) +{ + int err; + u16 vid; + + if (!vsi->netdev) + return -EINVAL; + + err = ice_vsi_vlan_setup(vsi); + if (err) + return err; + + for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { + err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); + if (err) + break; + } + + return err; +} + +/** + * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance + * @ring: The Tx ring to configure + * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized + * @pf_q: queue index in the PF space + * + * Configure the Tx descriptor ring in TLAN context. + */ +static void +ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + + tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; + + tlan_ctx->port_num = vsi->port_info->lport; + + /* Transmit Queue Length */ + tlan_ctx->qlen = ring->count; + + /* PF number */ + tlan_ctx->pf_num = hw->pf_id; + + /* queue belongs to a specific VSI type + * VF / VM index should be programmed per vmvf_type setting: + * for vmvf_type = VF, it is VF number between 0-256 + * for vmvf_type = VM, it is VM number between 0-767 + * for PF or EMP this field should be set to zero + */ + switch (vsi->type) { + case ICE_VSI_PF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + break; + default: + return; + } + + /* make sure the context is associated with the right VSI */ + tlan_ctx->src_vsi = vsi->vsi_num; + + tlan_ctx->tso_ena = ICE_TX_LEGACY; + tlan_ctx->tso_qnum = pf_q; + + /* Legacy or Advanced Host Interface: + * 0: Advanced Host Interface + * 1: Legacy Host Interface + */ + tlan_ctx->legacy_int = ICE_TX_LEGACY; +} + +/** + * ice_vsi_cfg_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) +{ + struct ice_aqc_add_tx_qgrp *qg_buf; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + enum ice_status status; + u16 buf_len, i, pf_q; + int err = 0, tc = 0; + u8 num_q_grps; + + buf_len = sizeof(struct ice_aqc_add_tx_qgrp); + qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + if (!qg_buf) + return -ENOMEM; + + if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { + err = -EINVAL; + goto err_cfg_txqs; + } + qg_buf->num_txqs = 1; + num_q_grps = 1; + + /* set up and configure the tx queues */ + ice_for_each_txq(vsi, i) { + struct ice_tlan_ctx tlan_ctx = { 0 }; + + pf_q = vsi->txq_map[i]; + ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as transmit + * comm scheduler queue doorbell. + */ + vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, + num_q_grps, qg_buf, buf_len, NULL); + if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + err = -ENODEV; + goto err_cfg_txqs; + } + + /* Add Tx Queue TEID into the VSI tx ring from the response + * This will complete configuring and enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + vsi->tx_rings[i]->txq_teid = + le32_to_cpu(txq->q_teid); + } +err_cfg_txqs: + devm_kfree(&pf->pdev->dev, qg_buf); + return err; +} + +/** + * ice_setup_rx_ctx - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in RLAN context. + */ +static int ice_setup_rx_ctx(struct ice_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + u32 rxdid = ICE_RXDID_FLEX_NIC; + struct ice_rlan_ctx rlan_ctx; + u32 regval; + u16 pf_q; + int err; + + /* what is RX queue number in global space of 2K rx queues */ + pf_q = vsi->rxq_map[ring->q_index]; + + /* clear the context structure first */ + memset(&rlan_ctx, 0, sizeof(rlan_ctx)); + + rlan_ctx.base = ring->dma >> 7; + + rlan_ctx.qlen = ring->count; + + /* Receive Packet Data Buffer Size. + * The Packet Data Buffer Size is defined in 128 byte units. + */ + rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + + /* use 32 byte descriptors */ + rlan_ctx.dsize = 1; + + /* Strip the Ethernet CRC bytes before the packet is posted to host + * memory. + */ + rlan_ctx.crcstrip = 1; + + /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ + rlan_ctx.l2tsel = 1; + + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; + + /* This controls whether VLAN is stripped from inner headers + * The VLAN in the inner L2 header is stripped to the receive + * descriptor if enabled by this flag. + */ + rlan_ctx.showiv = 0; + + /* Max packet size for this queue - must not be set to a larger value + * than 5 x DBUF + */ + rlan_ctx.rxmax = min_t(u16, vsi->max_frame, + ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); + + /* Rx queue threshold in units of 64 */ + rlan_ctx.lrxqthresh = 1; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); + regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile id; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + + /* Absolute queue number out of 2K needs to be passed */ + err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); + if (err) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + pf_q, err); + return -EIO; + } + + /* init queue specific tail register */ + ring->tail = hw->hw_addr + QRX_TAIL(pf_q); + writel(0, ring->tail); + ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); + + return 0; +} + +/** + * ice_vsi_cfg_rxqs - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Rx VSI for operation. + */ +static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +{ + int err = 0; + u16 i; + + if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) + vsi->max_frame = vsi->netdev->mtu + + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + else + vsi->max_frame = ICE_RXBUF_2048; + + vsi->rx_buf_len = ICE_RXBUF_2048; + /* set up individual rings */ + for (i = 0; i < vsi->num_rxq && !err; i++) + err = ice_setup_rx_ctx(vsi->rx_rings[i]); + + if (err) { + dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); + return -EIO; + } + return err; +} + +/** + * ice_vsi_cfg - Setup the VSI + * @vsi: the VSI being configured + * + * Return 0 on success and negative value on error + */ +static int ice_vsi_cfg(struct ice_vsi *vsi) +{ + int err; + + ice_set_rx_mode(vsi->netdev); + + err = ice_restore_vlan(vsi); + if (err) + return err; + + err = ice_vsi_cfg_txqs(vsi); + if (!err) + err = ice_vsi_cfg_rxqs(vsi); + + return err; +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + */ +static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 *q_teids, val; + u16 *q_ids, i; + int err = 0; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; + + q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), + GFP_KERNEL); + if (!q_teids) + return -ENOMEM; + + q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), + GFP_KERNEL); + if (!q_ids) { + err = -ENOMEM; + goto err_alloc_q_ids; + } + + /* set up the tx queue list to be disabled */ + ice_for_each_txq(vsi, i) { + u16 v_idx; + + if (!vsi->tx_rings || !vsi->tx_rings[i]) { + err = -EINVAL; + goto err_out; + } + + q_ids[i] = vsi->txq_map[i]; + q_teids[i] = vsi->tx_rings[i]->txq_teid; + + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); + + /* software is expected to wait for 100 ns */ + ndelay(100); + + /* trigger a software interrupt for the vector associated to + * the queue to schedule napi handler + */ + v_idx = vsi->tx_rings[i]->q_vector->v_idx; + wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), + GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); + } + status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, + NULL); + if (status) { + dev_err(&pf->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", + status); + err = -ENODEV; + } + +err_out: + devm_kfree(&pf->pdev->dev, q_ids); + +err_alloc_q_ids: + devm_kfree(&pf->pdev->dev, q_teids); + + return err; +} + +/** + * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @ena: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + */ +static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) +{ + int i; + + for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { + u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); + + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + break; + + usleep_range(10, 20); + } + if (i >= ICE_Q_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings + * @vsi: the VSI being configured + * @ena: start or stop the rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int i, j, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + int pf_q = vsi->rxq_map[i]; + u32 rx_reg; + + for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == + ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + continue; + + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) { + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + break; + } + } + + return ret; +} + +/** + * ice_vsi_start_rx_rings - start VSI's rx rings + * @vsi: the VSI whose rings are to be started + * + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, true); +} + +/** + * ice_vsi_stop_rx_rings - stop VSI's rx rings + * @vsi: the VSI + * + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, false); +} + +/** + * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings + * @vsi: the VSI + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) +{ + int err_tx, err_rx; + + err_tx = ice_vsi_stop_tx_rings(vsi); + if (err_tx) + dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); + + err_rx = ice_vsi_stop_rx_rings(vsi); + if (err_rx) + dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); + + if (err_tx || err_rx) + return -EIO; + + return 0; +} + +/** + * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI + * @vsi: the VSI being configured + */ +static void ice_napi_enable_all(struct ice_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_enable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * ice_up_complete - Finish the last steps of bringing up a connection + * @vsi: The VSI being configured + * + * Return 0 on success and negative value on error + */ +static int ice_up_complete(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int err; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_vsi_cfg_msix(vsi); + else + return -ENOTSUPP; + + /* Enable only Rx rings, Tx rings were enabled by the FW when the + * Tx queue group list was configured and the context bits were + * programmed using ice_vsi_cfg_txqs + */ + err = ice_vsi_start_rx_rings(vsi); + if (err) + return err; + + clear_bit(__ICE_DOWN, vsi->state); + ice_napi_enable_all(vsi); + ice_vsi_ena_irq(vsi); + + if (vsi->port_info && + (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && + vsi->netdev) { + ice_print_link_msg(vsi, true); + netif_tx_start_all_queues(vsi->netdev); + netif_carrier_on(vsi->netdev); + } + + ice_service_task_schedule(pf); + + return err; +} + +/** + * ice_up - Bring the connection back up after being down + * @vsi: VSI being configured + */ +int ice_up(struct ice_vsi *vsi) +{ + int err; + + err = ice_vsi_cfg(vsi); + if (!err) + err = ice_up_complete(vsi); + + return err; +} + +/** + * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring + * @ring: Tx or Rx ring to read stats from + * @pkts: packets stats counter + * @bytes: bytes stats counter + * + * This function fetches stats from the ring considering the atomic operations + * that needs to be performed to read u64 values in 32 bit machine. + */ +static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, + u64 *bytes) +{ + unsigned int start; + *pkts = 0; + *bytes = 0; + + if (!ring) + return; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + *pkts = ring->stats.pkts; + *bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +} + +/** + * ice_stat_update40 - read 40 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @hireg: high 32 bit HW register to read from + * @loreg: low 32 bit HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, + u64 *cur_stat) +{ + u64 new_data; + + new_data = rd32(hw, loreg); + new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (likely(new_data >= *prev_stat)) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; + *cur_stat &= 0xFFFFFFFFFFULL; +} + +/** + * ice_stat_update32 - read 32 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @reg: HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (likely(new_data >= *prev_stat)) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; +} + +/** + * ice_update_eth_stats - Update VSI-specific ethernet statistics counters + * @vsi: the VSI to be updated + */ +static void ice_update_eth_stats(struct ice_vsi *vsi) +{ + struct ice_eth_stats *prev_es, *cur_es; + struct ice_hw *hw = &vsi->back->hw; + u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ + + prev_es = &vsi->eth_stats_prev; + cur_es = &vsi->eth_stats; + + ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_bytes, + &cur_es->rx_bytes); + + ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_unicast, + &cur_es->rx_unicast); + + ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_multicast, + &cur_es->rx_multicast); + + ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_broadcast, + &cur_es->rx_broadcast); + + ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_discards, &cur_es->rx_discards); + + ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_bytes, + &cur_es->tx_bytes); + + ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_unicast, + &cur_es->tx_unicast); + + ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_multicast, + &cur_es->tx_multicast); + + ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_broadcast, + &cur_es->tx_broadcast); + + ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_errors, &cur_es->tx_errors); + + vsi->stat_offsets_loaded = true; +} + +/** + * ice_update_vsi_ring_stats - Update VSI stats counters + * @vsi: the VSI to be updated + */ +static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) +{ + struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + struct ice_ring *ring; + u64 pkts, bytes; + int i; + + /* reset netdev stats */ + vsi_stats->tx_packets = 0; + vsi_stats->tx_bytes = 0; + vsi_stats->rx_packets = 0; + vsi_stats->rx_bytes = 0; + + /* reset non-netdev (extended) stats */ + vsi->tx_restart = 0; + vsi->tx_busy = 0; + vsi->tx_linearize = 0; + vsi->rx_buf_failed = 0; + vsi->rx_page_failed = 0; + + rcu_read_lock(); + + /* update Tx rings counters */ + ice_for_each_txq(vsi, i) { + ring = READ_ONCE(vsi->tx_rings[i]); + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + vsi_stats->tx_packets += pkts; + vsi_stats->tx_bytes += bytes; + vsi->tx_restart += ring->tx_stats.restart_q; + vsi->tx_busy += ring->tx_stats.tx_busy; + vsi->tx_linearize += ring->tx_stats.tx_linearize; + } + + /* update Rx rings counters */ + ice_for_each_rxq(vsi, i) { + ring = READ_ONCE(vsi->rx_rings[i]); + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + vsi_stats->rx_packets += pkts; + vsi_stats->rx_bytes += bytes; + vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; + vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + } + + rcu_read_unlock(); +} + +/** + * ice_update_vsi_stats - Update VSI stats counters + * @vsi: the VSI to be updated + */ +static void ice_update_vsi_stats(struct ice_vsi *vsi) +{ + struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; + struct ice_eth_stats *cur_es = &vsi->eth_stats; + struct ice_pf *pf = vsi->back; + + if (test_bit(__ICE_DOWN, vsi->state) || + test_bit(__ICE_CFG_BUSY, pf->state)) + return; + + /* get stats as recorded by Tx/Rx rings */ + ice_update_vsi_ring_stats(vsi); + + /* get VSI stats as recorded by the hardware */ + ice_update_eth_stats(vsi); + + cur_ns->tx_errors = cur_es->tx_errors; + cur_ns->rx_dropped = cur_es->rx_discards; + cur_ns->tx_dropped = cur_es->tx_discards; + cur_ns->multicast = cur_es->rx_multicast; + + /* update some more netdev stats if this is main VSI */ + if (vsi->type == ICE_VSI_PF) { + cur_ns->rx_crc_errors = pf->stats.crc_errors; + cur_ns->rx_errors = pf->stats.crc_errors + + pf->stats.illegal_bytes; + cur_ns->rx_length_errors = pf->stats.rx_len_errors; + } +} + +/** + * ice_update_pf_stats - Update PF port stats counters + * @pf: PF whose stats needs to be updated + */ +static void ice_update_pf_stats(struct ice_pf *pf) +{ + struct ice_hw_port_stats *prev_ps, *cur_ps; + struct ice_hw *hw = &pf->hw; + u8 pf_id; + + prev_ps = &pf->stats_prev; + cur_ps = &pf->stats; + pf_id = hw->pf_id; + + ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, + &cur_ps->eth.rx_bytes); + + ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, + &cur_ps->eth.rx_unicast); + + ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, + &cur_ps->eth.rx_multicast); + + ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, + &cur_ps->eth.rx_broadcast); + + ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, + &cur_ps->eth.tx_bytes); + + ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, + &cur_ps->eth.tx_unicast); + + ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, + &cur_ps->eth.tx_multicast); + + ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), + pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, + &cur_ps->eth.tx_broadcast); + + ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_dropped_link_down, + &cur_ps->tx_dropped_link_down); + + ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), + pf->stat_prev_loaded, &prev_ps->rx_size_64, + &cur_ps->rx_size_64); + + ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), + pf->stat_prev_loaded, &prev_ps->rx_size_127, + &cur_ps->rx_size_127); + + ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), + pf->stat_prev_loaded, &prev_ps->rx_size_255, + &cur_ps->rx_size_255); + + ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), + pf->stat_prev_loaded, &prev_ps->rx_size_511, + &cur_ps->rx_size_511); + + ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), + GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); + + ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), + GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); + + ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), + GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_size_big, &cur_ps->rx_size_big); + + ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), + pf->stat_prev_loaded, &prev_ps->tx_size_64, + &cur_ps->tx_size_64); + + ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), + pf->stat_prev_loaded, &prev_ps->tx_size_127, + &cur_ps->tx_size_127); + + ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), + pf->stat_prev_loaded, &prev_ps->tx_size_255, + &cur_ps->tx_size_255); + + ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), + pf->stat_prev_loaded, &prev_ps->tx_size_511, + &cur_ps->tx_size_511); + + ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), + GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); + + ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), + GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); + + ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), + GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + &prev_ps->tx_size_big, &cur_ps->tx_size_big); + + ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, + &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); + + ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, + &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); + + ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, + &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); + + ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, + &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); + + ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, + &prev_ps->crc_errors, &cur_ps->crc_errors); + + ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, + &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); + + ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, + &prev_ps->mac_local_faults, + &cur_ps->mac_local_faults); + + ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, + &prev_ps->mac_remote_faults, + &cur_ps->mac_remote_faults); + + ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); + + ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_undersize, &cur_ps->rx_undersize); + + ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_fragments, &cur_ps->rx_fragments); + + ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_oversize, &cur_ps->rx_oversize); + + ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, + &prev_ps->rx_jabber, &cur_ps->rx_jabber); + + pf->stat_prev_loaded = true; +} + +/** + * ice_get_stats64 - get statistics for network device structure + * @netdev: network interface device structure + * @stats: main device statistics structure + */ +static +void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct rtnl_link_stats64 *vsi_stats; + struct ice_vsi *vsi = np->vsi; + + vsi_stats = &vsi->net_stats; + + if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) + return; + /* netdev packet/byte stats come from ring counter. These are obtained + * by summing up ring counters (done by ice_update_vsi_ring_stats). + */ + ice_update_vsi_ring_stats(vsi); + stats->tx_packets = vsi_stats->tx_packets; + stats->tx_bytes = vsi_stats->tx_bytes; + stats->rx_packets = vsi_stats->rx_packets; + stats->rx_bytes = vsi_stats->rx_bytes; + + /* The rest of the stats can be read from the hardware but instead we + * just return values that the watchdog task has already obtained from + * the hardware. + */ + stats->multicast = vsi_stats->multicast; + stats->tx_errors = vsi_stats->tx_errors; + stats->tx_dropped = vsi_stats->tx_dropped; + stats->rx_errors = vsi_stats->rx_errors; + stats->rx_dropped = vsi_stats->rx_dropped; + stats->rx_crc_errors = vsi_stats->rx_crc_errors; + stats->rx_length_errors = vsi_stats->rx_length_errors; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * ice_netpoll - polling "interrupt" handler + * @netdev: network interface device structure + * + * Used by netconsole to send skbs without having to re-enable interrupts. + * This is not called in the normal interrupt path. + */ +static void ice_netpoll(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + int i; + + if (test_bit(__ICE_DOWN, vsi->state) || + !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + return; + + for (i = 0; i < vsi->num_q_vectors; i++) + ice_msix_clean_rings(0, vsi->q_vectors[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/** + * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI + * @vsi: VSI having NAPI disabled + */ +static void ice_napi_disable_all(struct ice_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_disable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * ice_down - Shutdown the connection + * @vsi: The VSI being stopped + */ +int ice_down(struct ice_vsi *vsi) +{ + int i, err; + + /* Caller of this function is expected to set the + * vsi->state __ICE_DOWN bit + */ + if (vsi->netdev) { + netif_carrier_off(vsi->netdev); + netif_tx_disable(vsi->netdev); + } + + ice_vsi_dis_irq(vsi); + err = ice_vsi_stop_tx_rx_rings(vsi); + ice_napi_disable_all(vsi); + + ice_for_each_txq(vsi, i) + ice_clean_tx_ring(vsi->tx_rings[i]); + + ice_for_each_rxq(vsi, i) + ice_clean_rx_ring(vsi->rx_rings[i]); + + if (err) + netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", + vsi->vsi_num, vsi->vsw->sw_id); + return err; +} + +/** + * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources + * @vsi: VSI having resources allocated + * + * Return 0 on success, negative on failure + */ +static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) +{ + int i, err; + + if (!vsi->num_txq) { + dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", + vsi->vsi_num); + return -EINVAL; + } + + ice_for_each_txq(vsi, i) { + err = ice_setup_tx_ring(vsi->tx_rings[i]); + if (err) + break; + } + + return err; +} + +/** + * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources + * @vsi: VSI having resources allocated + * + * Return 0 on success, negative on failure + */ +static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) +{ + int i, err; + + if (!vsi->num_rxq) { + dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", + vsi->vsi_num); + return -EINVAL; + } + + ice_for_each_rxq(vsi, i) { + err = ice_setup_rx_ring(vsi->rx_rings[i]); + if (err) + break; + } + + return err; +} + +/** + * ice_vsi_req_irq - Request IRQ from the OS + * @vsi: The VSI IRQ is being requested for + * @basename: name for the vector + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) +{ + struct ice_pf *pf = vsi->back; + int err = -EINVAL; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + err = ice_vsi_req_irq_msix(vsi, basename); + + return err; +} + +/** + * ice_vsi_free_tx_rings - Free Tx resources for VSI queues + * @vsi: the VSI having resources freed + */ +static void ice_vsi_free_tx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->tx_rings) + return; + + ice_for_each_txq(vsi, i) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + ice_free_tx_ring(vsi->tx_rings[i]); +} + +/** + * ice_vsi_free_rx_rings - Free Rx resources for VSI queues + * @vsi: the VSI having resources freed + */ +static void ice_vsi_free_rx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->rx_rings) + return; + + ice_for_each_rxq(vsi, i) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) + ice_free_rx_ring(vsi->rx_rings[i]); +} + +/** + * ice_vsi_open - Called when a network interface is made active + * @vsi: the VSI to open + * + * Initialization of the VSI + * + * Returns 0 on success, negative value on error + */ +static int ice_vsi_open(struct ice_vsi *vsi) +{ + char int_name[ICE_INT_NAME_STR_LEN]; + struct ice_pf *pf = vsi->back; + int err; + + /* allocate descriptors */ + err = ice_vsi_setup_tx_rings(vsi); + if (err) + goto err_setup_tx; + + err = ice_vsi_setup_rx_rings(vsi); + if (err) + goto err_setup_rx; + + err = ice_vsi_cfg(vsi); + if (err) + goto err_setup_rx; + + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + err = ice_vsi_req_irq(vsi, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); + if (err) + goto err_set_qs; + + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); + if (err) + goto err_set_qs; + + err = ice_up_complete(vsi); + if (err) + goto err_up_complete; + + return 0; + +err_up_complete: + ice_down(vsi); +err_set_qs: + ice_vsi_free_irq(vsi); +err_setup_rx: + ice_vsi_free_rx_rings(vsi); +err_setup_tx: + ice_vsi_free_tx_rings(vsi); + + return err; +} + +/** + * ice_vsi_close - Shut down a VSI + * @vsi: the VSI being shut down + */ +static void ice_vsi_close(struct ice_vsi *vsi) +{ + if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + ice_down(vsi); + + ice_vsi_free_irq(vsi); + ice_vsi_free_tx_rings(vsi); + ice_vsi_free_rx_rings(vsi); +} + +/** + * ice_rss_clean - Delete RSS related VSI structures that hold user inputs + * @vsi: the VSI being removed + */ +static void ice_rss_clean(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + + pf = vsi->back; + + if (vsi->rss_hkey_user) + devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); + if (vsi->rss_lut_user) + devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); +} + +/** + * ice_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + */ +static int ice_vsi_release(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + + if (!vsi->back) + return -ENODEV; + pf = vsi->back; + + if (vsi->netdev) { + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_rss_clean(vsi); + + /* Disable VSI and free resources */ + ice_vsi_dis_irq(vsi); + ice_vsi_close(vsi); + + /* reclaim interrupt vectors back to PF */ + ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); + pf->num_avail_msix += vsi->num_q_vectors; + + ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); + ice_vsi_delete(vsi); + ice_vsi_free_q_vectors(vsi); + ice_vsi_clear_rings(vsi); + + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + + ice_vsi_clear(vsi); + + return 0; +} + +/** + * ice_dis_vsi - pause a VSI + * @vsi: the VSI being paused + */ +static void ice_dis_vsi(struct ice_vsi *vsi) +{ + if (test_bit(__ICE_DOWN, vsi->state)) + return; + + set_bit(__ICE_NEEDS_RESTART, vsi->state); + + if (vsi->netdev && netif_running(vsi->netdev) && + vsi->type == ICE_VSI_PF) + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + + ice_vsi_close(vsi); +} + +/** + * ice_ena_vsi - resume a VSI + * @vsi: the VSI being resume + */ +static void ice_ena_vsi(struct ice_vsi *vsi) +{ + if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) + return; + + if (vsi->netdev && netif_running(vsi->netdev)) + vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + else if (ice_vsi_open(vsi)) + /* this clears the DOWN bit */ + dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", + vsi->vsi_num, vsi->vsw->sw_id); +} + +/** + * ice_pf_dis_all_vsi - Pause all VSIs on a PF + * @pf: the PF + */ +static void ice_pf_dis_all_vsi(struct ice_pf *pf) +{ + int v; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + ice_dis_vsi(pf->vsi[v]); +} + +/** + * ice_pf_ena_all_vsi - Resume all VSIs on a PF + * @pf: the PF + */ +static void ice_pf_ena_all_vsi(struct ice_pf *pf) +{ + int v; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + ice_ena_vsi(pf->vsi[v]); +} + +/** + * ice_rebuild - rebuild after reset + * @pf: pf to rebuild + */ +static void ice_rebuild(struct ice_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + enum ice_status ret; + int err; + + if (test_bit(__ICE_DOWN, pf->state)) + goto clear_recovery; + + dev_dbg(dev, "rebuilding pf\n"); + + ret = ice_init_all_ctrlq(hw); + if (ret) { + dev_err(dev, "control queues init failed %d\n", ret); + goto fail_reset; + } + + ret = ice_clear_pf_cfg(hw); + if (ret) { + dev_err(dev, "clear PF configuration failed %d\n", ret); + goto fail_reset; + } + + ice_clear_pxe_mode(hw); + + ret = ice_get_caps(hw); + if (ret) { + dev_err(dev, "ice_get_caps failed %d\n", ret); + goto fail_reset; + } + + /* basic nic switch setup */ + err = ice_setup_pf_sw(pf); + if (err) { + dev_err(dev, "ice_setup_pf_sw failed\n"); + goto fail_reset; + } + + /* start misc vector */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "misc vector setup failed: %d\n", err); + goto fail_reset; + } + } + + /* restart the VSIs that were rebuilt and running before the reset */ + ice_pf_ena_all_vsi(pf); + + return; + +fail_reset: + ice_shutdown_all_ctrlq(hw); + set_bit(__ICE_RESET_FAILED, pf->state); +clear_recovery: + set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); +} + +/** + * ice_change_mtu - NDO callback to change the MTU + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int ice_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u8 count = 0; + + if (new_mtu == netdev->mtu) { + netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); + return 0; + } + + if (new_mtu < netdev->min_mtu) { + netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", + netdev->min_mtu); + return -EINVAL; + } else if (new_mtu > netdev->max_mtu) { + netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", + netdev->min_mtu); + return -EINVAL; + } + /* if a reset is in progress, wait for some time for it to complete */ + do { + if (ice_is_reset_recovery_pending(pf->state)) { + count++; + usleep_range(1000, 2000); + } else { + break; + } + + } while (count < 100); + + if (count == 100) { + netdev_err(netdev, "can't change mtu. Device is busy\n"); + return -EBUSY; + } + + netdev->mtu = new_mtu; + + /* if VSI is up, bring it down and then back up */ + if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + int err; + + err = ice_down(vsi); + if (err) { + netdev_err(netdev, "change mtu if_up err %d\n", err); + return err; + } + + err = ice_up(vsi); + if (err) { + netdev_err(netdev, "change mtu if_up err %d\n", err); + return err; + } + } + + netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); + return 0; +} + +/** + * ice_set_rss - Set RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: RSS hash seed + * @lut: Lookup table + * @lut_size: Lookup table size + * + * Returns 0 on success, negative on failure + */ +int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + + if (seed) { + struct ice_aqc_get_set_rss_keys *buf = + (struct ice_aqc_get_set_rss_keys *)seed; + + status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf); + + if (status) { + dev_err(&pf->pdev->dev, + "Cannot set RSS key, err %d aq_err %d\n", + status, hw->adminq.rq_last_status); + return -EIO; + } + } + + if (lut) { + status = ice_aq_set_rss_lut(hw, vsi->vsi_num, + vsi->rss_lut_type, lut, lut_size); + if (status) { + dev_err(&pf->pdev->dev, + "Cannot set RSS lut, err %d aq_err %d\n", + status, hw->adminq.rq_last_status); + return -EIO; + } + } + + return 0; +} + +/** + * ice_get_rss - Get RSS keys and lut + * @vsi: Pointer to VSI structure + * @seed: Buffer to store the keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Returns 0 on success, negative on failure + */ +int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + + if (seed) { + struct ice_aqc_get_set_rss_keys *buf = + (struct ice_aqc_get_set_rss_keys *)seed; + + status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf); + if (status) { + dev_err(&pf->pdev->dev, + "Cannot get RSS key, err %d aq_err %d\n", + status, hw->adminq.rq_last_status); + return -EIO; + } + } + + if (lut) { + status = ice_aq_get_rss_lut(hw, vsi->vsi_num, + vsi->rss_lut_type, lut, lut_size); + if (status) { + dev_err(&pf->pdev->dev, + "Cannot get RSS lut, err %d aq_err %d\n", + status, hw->adminq.rq_last_status); + return -EIO; + } + } + + return 0; +} + +/** + * ice_open - Called when a network interface becomes active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the netdev watchdog is enabled, + * and the stack is notified that the interface is ready. + * + * Returns 0 on success, negative value on failure + */ +static int ice_open(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + int err; + + netif_carrier_off(netdev); + + err = ice_vsi_open(vsi); + + if (err) + netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", + vsi->vsi_num, vsi->vsw->sw_id); + return err; +} + +/** + * ice_stop - Disables a network interface + * @netdev: network interface device structure + * + * The stop entry point is called when an interface is de-activated by the OS, + * and the netdevice enters the DOWN state. The hardware is still under the + * driver's control, but the netdev interface is disabled. + * + * Returns success only - not allowed to fail + */ +static int ice_stop(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + ice_vsi_close(vsi); + + return 0; +} + +/** + * ice_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buffer + * @netdev: This port's netdev + * @features: Offload features that the stack believes apply + */ +static netdev_features_t +ice_features_check(struct sk_buff *skb, + struct net_device __always_unused *netdev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + len = skb_network_header(skb) - skb->data; + if (len & ~(ICE_TXD_MACLEN_MAX)) + goto out_rm_features; + + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(ICE_TXD_IPLEN_MAX)) + goto out_rm_features; + + if (skb->encapsulation) { + len = skb_inner_network_header(skb) - skb_transport_header(skb); + if (len & ~(ICE_TXD_L4LEN_MAX)) + goto out_rm_features; + + len = skb_inner_transport_header(skb) - + skb_inner_network_header(skb); + if (len & ~(ICE_TXD_IPLEN_MAX)) + goto out_rm_features; + } + + return features; +out_rm_features: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static const struct net_device_ops ice_netdev_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_features_check = ice_features_check, + .ndo_set_rx_mode = ice_set_rx_mode, + .ndo_set_mac_address = ice_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ice_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_set_features = ice_set_features, + .ndo_fdb_add = ice_fdb_add, + .ndo_fdb_del = ice_fdb_del, +}; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c new file mode 100644 index 000000000000..fa7a69ac92b0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" + +/** + * ice_aq_read_nvm + * @hw: pointer to the hw struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cd: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands (0x0701) + */ +static enum ice_status +ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, + void *data, bool last_command, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + cmd = &desc.params.nvm; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; + cmd->module_typeid = module_typeid; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/** + * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. + * @hw: pointer to the HW structure + * @offset: offset in words from module start + * @words: number of words to access + */ +static enum ice_status +ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) +{ + if ((offset + words) > hw->nvm.sr_words) { + ice_debug(hw, ICE_DBG_NVM, + "NVM error: offset beyond SR lmt.\n"); + return ICE_ERR_PARAM; + } + + if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { + /* We can access only up to 4KB (one sector), in one AQ write */ + ice_debug(hw, ICE_DBG_NVM, + "NVM error: tried to access %d words, limit is %d.\n", + words, ICE_SR_SECTOR_SIZE_IN_WORDS); + return ICE_ERR_PARAM; + } + + if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != + (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { + /* A single access cannot spread over two sectors */ + ice_debug(hw, ICE_DBG_NVM, + "NVM error: cannot spread over two sectors.\n"); + return ICE_ERR_PARAM; + } + + return 0; +} + +/** + * ice_read_sr_aq - Read Shadow RAM. + * @hw: pointer to the HW structure + * @offset: offset in words from module start + * @words: number of words to read + * @data: buffer for words reads from Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Reads 16-bit word buffers from the Shadow RAM using the admin command. + */ +static enum ice_status +ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data, + bool last_command) +{ + enum ice_status status; + + status = ice_check_sr_access_params(hw, offset, words); + + /* values in "offset" and "words" parameters are sized as words + * (16 bits) but ice_aq_read_nvm expects these values in bytes. + * So do this conversion while calling ice_aq_read_nvm. + */ + if (!status) + status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data, + last_command, NULL); + + return status; +} + +/** + * ice_read_sr_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method. + */ +static enum ice_status +ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) +{ + enum ice_status status; + + status = ice_read_sr_aq(hw, offset, 1, data, true); + if (!status) + *data = le16_to_cpu(*(__le16 *)data); + + return status; +} + +/** + * ice_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership. + */ +static enum +ice_status ice_acquire_nvm(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + if (hw->nvm.blank_nvm_mode) + return 0; + + return ice_acquire_res(hw, ICE_NVM_RES_ID, access); +} + +/** + * ice_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM ownership. + */ +static void ice_release_nvm(struct ice_hw *hw) +{ + if (hw->nvm.blank_nvm_mode) + return; + + ice_release_res(hw, ICE_NVM_RES_ID); +} + +/** + * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. + */ +static enum ice_status +ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +{ + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (!status) { + status = ice_read_sr_word_aq(hw, offset, data); + ice_release_nvm(hw); + } + + return status; +} + +/** + * ice_init_nvm - initializes NVM setting + * @hw: pointer to the hw struct + * + * This function reads and populates NVM settings such as Shadow RAM size, + * max_timeout, and blank_nvm_mode + */ +enum ice_status ice_init_nvm(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->nvm; + u16 eetrack_lo, eetrack_hi; + enum ice_status status = 0; + u32 fla, gens_stat; + u8 sr_size; + + /* The SR size is stored regardless of the nvm programming mode + * as the blank mode may be used in the factory line. + */ + gens_stat = rd32(hw, GLNVM_GENS); + sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; + + /* Switching to words (sr_size contains power of 2) */ + nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, GLNVM_FLA); + if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ + nvm->blank_nvm_mode = false; + } else { /* Blank programming mode */ + nvm->blank_nvm_mode = true; + status = ICE_ERR_NVM_BLANK_MODE; + ice_debug(hw, ICE_DBG_NVM, + "NVM init error: unsupported blank mode.\n"); + return status; + } + + status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read DEV starter version.\n"); + return status; + } + + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); + return status; + } + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); + return status; + } + + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h new file mode 100644 index 000000000000..f57c414bc0a9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_OSDEP_H_ +#define _ICE_OSDEP_H_ + +#include <linux/types.h> +#include <linux/io.h> +#ifndef CONFIG_64BIT +#include <linux/io-64-nonatomic-lo-hi.h> +#endif + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#define ice_flush(a) rd32((a), GLGEN_STAT) +#define ICE_M(m, s) ((m) << (s)) + +struct ice_dma_mem { + void *va; + dma_addr_t pa; + size_t size; +}; + +#define ice_hw_to_dev(ptr) \ + (&(container_of((ptr), struct ice_pf, hw))->pdev->dev) + +#ifdef CONFIG_DYNAMIC_DEBUG +#define ice_debug(hw, type, fmt, args...) \ + dev_dbg(ice_hw_to_dev(hw), fmt, ##args) + +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ + print_hex_dump_debug(KBUILD_MODNAME " ", \ + DUMP_PREFIX_OFFSET, rowsize, \ + groupsize, buf, len, false) +#else +#define ice_debug(hw, type, fmt, args...) \ +do { \ + if ((type) & (hw)->debug_mask) \ + dev_info(ice_hw_to_dev(hw), fmt, ##args); \ +} while (0) + +#ifdef DEBUG +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +do { \ + if ((type) & (hw)->debug_mask) \ + print_hex_dump_debug(KBUILD_MODNAME, \ + DUMP_PREFIX_OFFSET, \ + rowsize, groupsize, buf, \ + len, false); \ +} while (0) +#else +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +do { \ + struct ice_hw *hw_l = hw; \ + if ((type) & (hw_l)->debug_mask) { \ + u16 len_l = len; \ + u8 *buf_l = buf; \ + int i; \ + for (i = 0; i < (len_l - 16); i += 16) \ + ice_debug(hw_l, type, "0x%04X %16ph\n",\ + i, ((buf_l) + i)); \ + if (i < len_l) \ + ice_debug(hw_l, type, "0x%04X %*ph\n", \ + i, ((len_l) - i), ((buf_l) + i));\ + } \ +} while (0) +#endif /* DEBUG */ +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#endif /* _ICE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c new file mode 100644 index 000000000000..f16ff3e4a840 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -0,0 +1,1659 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_sched.h" + +/** + * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB + * @pi: port information structure + * @info: Scheduler element information from firmware + * + * This function inserts the root node of the scheduling tree topology + * to the SW DB. + */ +static enum ice_status +ice_sched_add_root_node(struct ice_port_info *pi, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_sched_node *root; + struct ice_hw *hw; + u16 max_children; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); + if (!root) + return ICE_ERR_NO_MEMORY; + + max_children = le16_to_cpu(hw->layer_info[0].max_children); + root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + sizeof(*root), GFP_KERNEL); + if (!root->children) { + devm_kfree(ice_hw_to_dev(hw), root); + return ICE_ERR_NO_MEMORY; + } + + memcpy(&root->info, info, sizeof(*info)); + pi->root = root; + return 0; +} + +/** + * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB + * @start_node: pointer to the starting ice_sched_node struct in a sub-tree + * @teid: node teid to search + * + * This function searches for a node matching the teid in the scheduling tree + * from the SW DB. The search is recursive and is restricted by the number of + * layers it has searched through; stopping at the max supported layer. + * + * This function needs to be called when holding the port_info->sched_lock + */ +struct ice_sched_node * +ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) +{ + u16 i; + + /* The TEID is same as that of the start_node */ + if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) + return start_node; + + /* The node has no children or is at the max layer */ + if (!start_node->num_children || + start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || + start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) + return NULL; + + /* Check if teid matches to any of the children nodes */ + for (i = 0; i < start_node->num_children; i++) + if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) + return start_node->children[i]; + + /* Search within each child's sub-tree */ + for (i = 0; i < start_node->num_children; i++) { + struct ice_sched_node *tmp; + + tmp = ice_sched_find_node_by_teid(start_node->children[i], + teid); + if (tmp) + return tmp; + } + + return NULL; +} + +/** + * ice_sched_add_node - Insert the Tx scheduler node in SW DB + * @pi: port information structure + * @layer: Scheduler layer of the node + * @info: Scheduler element information from firmware + * + * This function inserts a scheduler node to the SW DB. + */ +enum ice_status +ice_sched_add_node(struct ice_port_info *pi, u8 layer, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_sched_node *parent; + struct ice_sched_node *node; + struct ice_hw *hw; + u16 max_children; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + /* A valid parent node should be there */ + parent = ice_sched_find_node_by_teid(pi->root, + le32_to_cpu(info->parent_teid)); + if (!parent) { + ice_debug(hw, ICE_DBG_SCHED, + "Parent Node not found for parent_teid=0x%x\n", + le32_to_cpu(info->parent_teid)); + return ICE_ERR_PARAM; + } + + node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); + if (!node) + return ICE_ERR_NO_MEMORY; + max_children = le16_to_cpu(hw->layer_info[layer].max_children); + if (max_children) { + node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + sizeof(*node), GFP_KERNEL); + if (!node->children) { + devm_kfree(ice_hw_to_dev(hw), node); + return ICE_ERR_NO_MEMORY; + } + } + + node->in_use = true; + node->parent = parent; + node->tx_sched_layer = layer; + parent->children[parent->num_children++] = node; + memcpy(&node->info, info, sizeof(*info)); + return 0; +} + +/** + * ice_aq_delete_sched_elems - delete scheduler elements + * @hw: pointer to the hw struct + * @grps_req: number of groups to delete + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @grps_del: returns total number of elements deleted + * @cd: pointer to command details structure or NULL + * + * Delete scheduling elements (0x040F) + */ +static enum ice_status +ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_delete_elem *buf, u16 buf_size, + u16 *grps_del, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_move_delete_elem *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.add_move_delete_elem; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_grps_req = cpu_to_le16(grps_req); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && grps_del) + *grps_del = le16_to_cpu(cmd->num_grps_updated); + + return status; +} + +/** + * ice_sched_remove_elems - remove nodes from hw + * @hw: pointer to the hw struct + * @parent: pointer to the parent node + * @num_nodes: number of nodes + * @node_teids: array of node teids to be deleted + * + * This function remove nodes from hw + */ +static enum ice_status +ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, + u16 num_nodes, u32 *node_teids) +{ + struct ice_aqc_delete_elem *buf; + u16 i, num_groups_removed = 0; + enum ice_status status; + u16 buf_size; + + buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1); + buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + buf->hdr.parent_teid = parent->info.node_teid; + buf->hdr.num_elems = cpu_to_le16(num_nodes); + for (i = 0; i < num_nodes; i++) + buf->teid[i] = cpu_to_le32(node_teids[i]); + status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, + &num_groups_removed, NULL); + if (status || num_groups_removed != 1) + ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_sched_get_first_node - get the first node of the given layer + * @hw: pointer to the hw struct + * @parent: pointer the base node of the subtree + * @layer: layer number + * + * This function retrieves the first node of the given layer from the subtree + */ +static struct ice_sched_node * +ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, + u8 layer) +{ + u8 i; + + if (layer < hw->sw_entry_point_layer) + return NULL; + for (i = 0; i < parent->num_children; i++) { + struct ice_sched_node *node = parent->children[i]; + + if (node) { + if (node->tx_sched_layer == layer) + return node; + /* this recursion is intentional, and wouldn't + * go more than 9 calls + */ + return ice_sched_get_first_node(hw, node, layer); + } + } + return NULL; +} + +/** + * ice_sched_get_tc_node - get pointer to TC node + * @pi: port information structure + * @tc: TC number + * + * This function returns the TC node pointer + */ +struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) +{ + u8 i; + + if (!pi) + return NULL; + for (i = 0; i < pi->root->num_children; i++) + if (pi->root->children[i]->tc_num == tc) + return pi->root->children[i]; + return NULL; +} + +/** + * ice_free_sched_node - Free a Tx scheduler node from SW DB + * @pi: port information structure + * @node: pointer to the ice_sched_node struct + * + * This function frees up a node from SW DB as well as from HW + * + * This function needs to be called with the port_info->sched_lock held + */ +void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) +{ + struct ice_sched_node *parent; + struct ice_hw *hw = pi->hw; + u8 i, j; + + /* Free the children before freeing up the parent node + * The parent array is updated below and that shifts the nodes + * in the array. So always pick the first child if num children > 0 + */ + while (node->num_children) + ice_free_sched_node(pi, node->children[0]); + + /* Leaf, TC and root nodes can't be deleted by SW */ + if (node->tx_sched_layer >= hw->sw_entry_point_layer && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { + u32 teid = le32_to_cpu(node->info.node_teid); + enum ice_status status; + + status = ice_sched_remove_elems(hw, node->parent, 1, &teid); + if (status) + ice_debug(hw, ICE_DBG_SCHED, + "remove element failed %d\n", status); + } + parent = node->parent; + /* root has no parent */ + if (parent) { + struct ice_sched_node *p, *tc_node; + + /* update the parent */ + for (i = 0; i < parent->num_children; i++) + if (parent->children[i] == node) { + for (j = i + 1; j < parent->num_children; j++) + parent->children[j - 1] = + parent->children[j]; + parent->num_children--; + break; + } + + /* search for previous sibling that points to this node and + * remove the reference + */ + tc_node = ice_sched_get_tc_node(pi, node->tc_num); + if (!tc_node) { + ice_debug(hw, ICE_DBG_SCHED, + "Invalid TC number %d\n", node->tc_num); + goto err_exit; + } + p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer); + while (p) { + if (p->sibling == node) { + p->sibling = node->sibling; + break; + } + p = p->sibling; + } + } +err_exit: + /* leaf nodes have no children */ + if (node->children) + devm_kfree(ice_hw_to_dev(hw), node->children); + devm_kfree(ice_hw_to_dev(hw), node); +} + +/** + * ice_aq_get_dflt_topo - gets default scheduler topology + * @hw: pointer to the hw struct + * @lport: logical port number + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_branches: returns total number of queue to port branches + * @cd: pointer to command details structure or NULL + * + * Get default scheduler topology (0x400) + */ +static enum ice_status +ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, + struct ice_aqc_get_topo_elem *buf, u16 buf_size, + u8 *num_branches, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_topo *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_topo; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); + cmd->port_num = lport; + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && num_branches) + *num_branches = cmd->num_branches; + + return status; +} + +/** + * ice_aq_add_sched_elems - adds scheduling element + * @hw: pointer to the hw struct + * @grps_req: the number of groups that are requested to be added + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @grps_added: returns total number of groups added + * @cd: pointer to command details structure or NULL + * + * Add scheduling elements (0x0401) + */ +static enum ice_status +ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_add_elem *buf, u16 buf_size, + u16 *grps_added, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_move_delete_elem *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.add_move_delete_elem; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_grps_req = cpu_to_le16(grps_req); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && grps_added) + *grps_added = le16_to_cpu(cmd->num_grps_updated); + + return status; +} + +/** + * ice_suspend_resume_elems - suspend/resume scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to suspend + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements suspended + * @cd: pointer to command details structure or NULL + * @cmd_code: command code for suspend or resume + * + * suspend/resume scheduler elements + */ +static enum ice_status +ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_suspend_resume_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd, + enum ice_adminq_opc cmd_code) +{ + struct ice_aqc_get_cfg_elem *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_update_elem; + ice_fill_dflt_direct_cmd_desc(&desc, cmd_code); + cmd->num_elem_req = cpu_to_le16(elems_req); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && elems_ret) + *elems_ret = le16_to_cpu(cmd->num_elem_resp); + return status; +} + +/** + * ice_aq_suspend_sched_elems - suspend scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to suspend + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements suspended + * @cd: pointer to command details structure or NULL + * + * Suspend scheduling elements (0x0409) + */ +static enum ice_status +ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_suspend_resume_elem *buf, + u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, + cd, ice_aqc_opc_suspend_sched_elems); +} + +/** + * ice_aq_resume_sched_elems - resume scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to resume + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements resumed + * @cd: pointer to command details structure or NULL + * + * resume scheduling elements (0x040A) + */ +static enum ice_status +ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_suspend_resume_elem *buf, + u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, + cd, ice_aqc_opc_resume_sched_elems); +} + +/** + * ice_aq_query_sched_res - query scheduler resource + * @hw: pointer to the hw struct + * @buf_size: buffer size in bytes + * @buf: pointer to buffer + * @cd: pointer to command details structure or NULL + * + * Query scheduler resource allocation (0x0412) + */ +static enum ice_status +ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, + struct ice_aqc_query_txsched_res_resp *buf, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_sched_suspend_resume_elems - suspend or resume hw nodes + * @hw: pointer to the hw struct + * @num_nodes: number of nodes + * @node_teids: array of node teids to be suspended or resumed + * @suspend: true means suspend / false means resume + * + * This function suspends or resumes hw nodes + */ +static enum ice_status +ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, + bool suspend) +{ + struct ice_aqc_suspend_resume_elem *buf; + u16 i, buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf) * num_nodes; + buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < num_nodes; i++) + buf->teid[i] = cpu_to_le32(node_teids[i]); + + if (suspend) + status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, + buf_size, &num_elem_ret, + NULL); + else + status = ice_aq_resume_sched_elems(hw, num_nodes, buf, + buf_size, &num_elem_ret, + NULL); + if (status || num_elem_ret != num_nodes) + ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); + + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_sched_clear_tx_topo - clears the schduler tree nodes + * @pi: port information structure + * + * This function removes all the nodes from HW as well as from SW DB. + */ +static void ice_sched_clear_tx_topo(struct ice_port_info *pi) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_vsi_info *vsi_elem; + struct ice_sched_agg_info *atmp; + struct ice_sched_vsi_info *tmp; + struct ice_hw *hw; + + if (!pi) + return; + + hw = pi->hw; + + list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *vtmp; + + list_for_each_entry_safe(agg_vsi_info, vtmp, + &agg_info->agg_vsi_list, list_entry) { + list_del(&agg_vsi_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); + } + } + + /* remove the vsi list */ + list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list, + list_entry) { + list_del(&vsi_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_elem); + } + + if (pi->root) { + ice_free_sched_node(pi, pi->root); + pi->root = NULL; + } +} + +/** + * ice_sched_clear_port - clear the scheduler elements from SW DB for a port + * @pi: port information structure + * + * Cleanup scheduling elements from SW DB + */ +static void ice_sched_clear_port(struct ice_port_info *pi) +{ + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return; + + pi->port_state = ICE_SCHED_PORT_STATE_INIT; + mutex_lock(&pi->sched_lock); + ice_sched_clear_tx_topo(pi); + mutex_unlock(&pi->sched_lock); + mutex_destroy(&pi->sched_lock); +} + +/** + * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports + * @hw: pointer to the hw struct + * + * Cleanup scheduling elements from SW DB for all the ports + */ +void ice_sched_cleanup_all(struct ice_hw *hw) +{ + if (!hw || !hw->port_info) + return; + + if (hw->layer_info) + devm_kfree(ice_hw_to_dev(hw), hw->layer_info); + + ice_sched_clear_port(hw->port_info); + + hw->num_tx_sched_layers = 0; + hw->num_tx_sched_phys_layers = 0; + hw->flattened_layers = 0; + hw->max_cgds = 0; +} + +/** + * ice_sched_create_vsi_info_entry - create an empty new VSI entry + * @pi: port information structure + * @vsi_id: VSI Id + * + * This function creates a new VSI entry and adds it to list + */ +static struct ice_sched_vsi_info * +ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) +{ + struct ice_sched_vsi_info *vsi_elem; + + if (!pi) + return NULL; + + vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem), + GFP_KERNEL); + if (!vsi_elem) + return NULL; + + list_add(&vsi_elem->list_entry, &pi->vsi_info_list); + vsi_elem->vsi_id = vsi_id; + return vsi_elem; +} + +/** + * ice_sched_add_elems - add nodes to hw and SW DB + * @pi: port information structure + * @tc_node: pointer to the branch node + * @parent: pointer to the parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes + * @num_nodes_added: pointer to num nodes added + * @first_node_teid: if new nodes are added then return the teid of first node + * + * This function add nodes to hw as well as to SW DB for a given layer + */ +static enum ice_status +ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, u16 num_nodes, + u16 *num_nodes_added, u32 *first_node_teid) +{ + struct ice_sched_node *prev, *new_node; + struct ice_aqc_add_elem *buf; + u16 i, num_groups_added = 0; + enum ice_status status = 0; + struct ice_hw *hw = pi->hw; + u16 buf_size; + u32 teid; + + buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1); + buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->hdr.parent_teid = parent->info.node_teid; + buf->hdr.num_elems = cpu_to_le16(num_nodes); + for (i = 0; i < num_nodes; i++) { + buf->generic[i].parent_teid = parent->info.node_teid; + buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; + buf->generic[i].data.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->generic[i].data.generic = 0; + buf->generic[i].data.cir_bw.bw_profile_idx = + ICE_SCHED_DFLT_RL_PROF_ID; + buf->generic[i].data.eir_bw.bw_profile_idx = + ICE_SCHED_DFLT_RL_PROF_ID; + } + + status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, + &num_groups_added, NULL); + if (status || num_groups_added != 1) { + ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n"); + devm_kfree(ice_hw_to_dev(hw), buf); + return ICE_ERR_CFG; + } + + *num_nodes_added = num_nodes; + /* add nodes to the SW DB */ + for (i = 0; i < num_nodes; i++) { + status = ice_sched_add_node(pi, layer, &buf->generic[i]); + if (status) { + ice_debug(hw, ICE_DBG_SCHED, + "add nodes in SW DB failed status =%d\n", + status); + break; + } + + teid = le32_to_cpu(buf->generic[i].node_teid); + new_node = ice_sched_find_node_by_teid(parent, teid); + + if (!new_node) { + ice_debug(hw, ICE_DBG_SCHED, + "Node is missing for teid =%d\n", teid); + break; + } + + new_node->sibling = NULL; + new_node->tc_num = tc_node->tc_num; + + /* add it to previous node sibling pointer */ + /* Note: siblings are not linked across branches */ + prev = ice_sched_get_first_node(hw, tc_node, layer); + + if (prev && prev != new_node) { + while (prev->sibling) + prev = prev->sibling; + prev->sibling = new_node; + } + + if (i == 0) + *first_node_teid = teid; + } + + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * @pi: port information structure + * @tc_node: pointer to TC node + * @parent: pointer to parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes to be added + * @first_node_teid: pointer to the first node teid + * @num_nodes_added: pointer to number of nodes added + * + * This function add nodes to a given layer. + */ +static enum ice_status +ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) +{ + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes, max_child_nodes; + enum ice_status status = 0; + struct ice_hw *hw = pi->hw; + u16 num_added = 0; + u32 temp; + + if (!num_nodes) + return status; + + if (!parent || layer < hw->sw_entry_point_layer) + return ICE_ERR_PARAM; + + *num_nodes_added = 0; + + /* max children per node per layer */ + max_child_nodes = + le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); + + /* current number of children + required nodes exceed max children ? */ + if ((parent->num_children + num_nodes) > max_child_nodes) { + /* Fail if the parent is a TC node */ + if (parent == tc_node) + return ICE_ERR_CFG; + + /* utilize all the spaces if the parent is not full */ + if (parent->num_children < max_child_nodes) { + new_num_nodes = max_child_nodes - parent->num_children; + /* this recursion is intentional, and wouldn't + * go more than 2 calls + */ + status = ice_sched_add_nodes_to_layer(pi, tc_node, + parent, layer, + new_num_nodes, + first_node_teid, + &num_added); + if (status) + return status; + + *num_nodes_added += num_added; + } + /* Don't modify the first node teid memory if the first node was + * added already in the above call. Instead send some temp + * memory for all other recursive calls. + */ + if (num_added) + first_teid_ptr = &temp; + + new_num_nodes = num_nodes - num_added; + + /* This parent is full, try the next sibling */ + parent = parent->sibling; + + /* this recursion is intentional, for 1024 queues + * per VSI, it goes max of 16 iterations. + * 1024 / 8 = 128 layer 8 nodes + * 128 /8 = 16 (add 8 nodes per iteration) + */ + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, + layer, new_num_nodes, + first_teid_ptr, + &num_added); + *num_nodes_added += num_added; + return status; + } + + status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, + num_nodes_added, first_node_teid); + return status; +} + +/** + * ice_sched_get_qgrp_layer - get the current queue group layer number + * @hw: pointer to the hw struct + * + * This function returns the current queue group layer number + */ +static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) +{ + /* It's always total layers - 1, the array is 0 relative so -2 */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; +} + +/** + * ice_sched_get_vsi_layer - get the current VSI layer number + * @hw: pointer to the hw struct + * + * This function returns the current VSI layer number + */ +static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) +{ + /* Num Layers VSI layer + * 9 6 + * 7 4 + * 5 or less sw_entry_point_layer + */ + /* calculate the vsi layer based on number of layers. */ + if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { + u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + + if (layer > hw->sw_entry_point_layer) + return layer; + } + return hw->sw_entry_point_layer; +} + +/** + * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer + * @pi: pointer to the port info struct + * @layer: layer number + * + * This function calculates the number of nodes present in the scheduler tree + * including all the branches for a given layer + */ +static u16 +ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) +{ + struct ice_hw *hw; + u16 num_nodes = 0; + u8 i; + + if (!pi) + return num_nodes; + + hw = pi->hw; + + /* Calculate the number of nodes for all TCs */ + for (i = 0; i < pi->root->num_children; i++) { + struct ice_sched_node *tc_node, *node; + + tc_node = pi->root->children[i]; + + /* Get the first node */ + node = ice_sched_get_first_node(hw, tc_node, layer); + if (!node) + continue; + + /* count the siblings */ + while (node) { + num_nodes++; + node = node->sibling; + } + } + + return num_nodes; +} + +/** + * ice_sched_val_max_nodes - check max number of nodes reached or not + * @pi: port information structure + * @new_num_nodes_per_layer: pointer to the new number of nodes array + * + * This function checks whether the scheduler tree layers have enough space to + * add new nodes + */ +static enum ice_status +ice_sched_validate_for_max_nodes(struct ice_port_info *pi, + u16 *new_num_nodes_per_layer) +{ + struct ice_hw *hw = pi->hw; + u8 i, qg_layer; + u16 num_nodes; + + qg_layer = ice_sched_get_qgrp_layer(hw); + + /* walk through all the layers from SW entry point to qgroup layer */ + for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { + num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); + if (num_nodes + new_num_nodes_per_layer[i] > + le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { + ice_debug(hw, ICE_DBG_SCHED, + "max nodes reached for layer = %d\n", i); + return ICE_ERR_CFG; + } + } + return 0; +} + +/** + * ice_rm_dflt_leaf_node - remove the default leaf node in the tree + * @pi: port information structure + * + * This function removes the leaf node that was created by the FW + * during initialization + */ +static void +ice_rm_dflt_leaf_node(struct ice_port_info *pi) +{ + struct ice_sched_node *node; + + node = pi->root; + while (node) { + if (!node->num_children) + break; + node = node->children[0]; + } + if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { + u32 teid = le32_to_cpu(node->info.node_teid); + enum ice_status status; + + /* remove the default leaf node */ + status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); + if (!status) + ice_free_sched_node(pi, node); + } +} + +/** + * ice_sched_rm_dflt_nodes - free the default nodes in the tree + * @pi: port information structure + * + * This function frees all the nodes except root and TC that were created by + * the FW during initialization + */ +static void +ice_sched_rm_dflt_nodes(struct ice_port_info *pi) +{ + struct ice_sched_node *node; + + ice_rm_dflt_leaf_node(pi); + + /* remove the default nodes except TC and root nodes */ + node = pi->root; + while (node) { + if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { + ice_free_sched_node(pi, node); + break; + } + + if (!node->num_children) + break; + node = node->children[0]; + } +} + +/** + * ice_sched_init_port - Initialize scheduler by querying information from FW + * @pi: port info structure for the tree to cleanup + * + * This function is the initial call to find the total number of Tx scheduler + * resources, default topology created by firmware and storing the information + * in SW DB. + */ +enum ice_status ice_sched_init_port(struct ice_port_info *pi) +{ + struct ice_aqc_get_topo_elem *buf; + enum ice_status status; + struct ice_hw *hw; + u8 num_branches; + u16 num_elems; + u8 i, j; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + + /* Query the Default Topology from FW */ + buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, + sizeof(*buf), GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + /* Query default scheduling tree topology */ + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, + sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, + &num_branches, NULL); + if (status) + goto err_init_port; + + /* num_branches should be between 1-8 */ + if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { + ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", + num_branches); + status = ICE_ERR_PARAM; + goto err_init_port; + } + + /* get the number of elements on the default/first branch */ + num_elems = le16_to_cpu(buf[0].hdr.num_elems); + + /* num_elems should always be between 1-9 */ + if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { + ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", + num_elems); + status = ICE_ERR_PARAM; + goto err_init_port; + } + + /* If the last node is a leaf node then the index of the Q group + * layer is two less than the number of elements. + */ + if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == + ICE_AQC_ELEM_TYPE_LEAF) + pi->last_node_teid = + le32_to_cpu(buf[0].generic[num_elems - 2].node_teid); + else + pi->last_node_teid = + le32_to_cpu(buf[0].generic[num_elems - 1].node_teid); + + /* Insert the Tx Sched root node */ + status = ice_sched_add_root_node(pi, &buf[0].generic[0]); + if (status) + goto err_init_port; + + /* Parse the default tree and cache the information */ + for (i = 0; i < num_branches; i++) { + num_elems = le16_to_cpu(buf[i].hdr.num_elems); + + /* Skip root element as already inserted */ + for (j = 1; j < num_elems; j++) { + /* update the sw entry point */ + if (buf[0].generic[j].data.elem_type == + ICE_AQC_ELEM_TYPE_ENTRY_POINT) + hw->sw_entry_point_layer = j; + + status = ice_sched_add_node(pi, j, &buf[i].generic[j]); + if (status) + goto err_init_port; + } + } + + /* Remove the default nodes. */ + if (pi->root) + ice_sched_rm_dflt_nodes(pi); + + /* initialize the port for handling the scheduler tree */ + pi->port_state = ICE_SCHED_PORT_STATE_READY; + mutex_init(&pi->sched_lock); + INIT_LIST_HEAD(&pi->agg_list); + INIT_LIST_HEAD(&pi->vsi_info_list); + +err_init_port: + if (status && pi->root) { + ice_free_sched_node(pi, pi->root); + pi->root = NULL; + } + + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_sched_query_res_alloc - query the FW for num of logical sched layers + * @hw: pointer to the HW struct + * + * query FW for allocated scheduler resources and store in HW struct + */ +enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +{ + struct ice_aqc_query_txsched_res_resp *buf; + enum ice_status status = 0; + + if (hw->layer_info) + return status; + + buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); + if (!buf) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); + if (status) + goto sched_query_out; + + hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); + hw->num_tx_sched_phys_layers = + le16_to_cpu(buf->sched_props.phys_levels); + hw->flattened_layers = buf->sched_props.flattening_bitmap; + hw->max_cgds = buf->sched_props.max_pf_cgds; + + hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, + (hw->num_tx_sched_layers * + sizeof(*hw->layer_info)), + GFP_KERNEL); + if (!hw->layer_info) { + status = ICE_ERR_NO_MEMORY; + goto sched_query_out; + } + +sched_query_out: + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id + * @pi: port information structure + * @vsi_id: vsi id + * + * This function retrieves the vsi list for the given vsi id + */ +static struct ice_sched_vsi_info * +ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) +{ + struct ice_sched_vsi_info *list_elem; + + if (!pi) + return NULL; + + list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry) + if (list_elem->vsi_id == vsi_id) + return list_elem; + return NULL; +} + +/** + * ice_sched_find_node_in_subtree - Find node in part of base node subtree + * @hw: pointer to the hw struct + * @base: pointer to the base node + * @node: pointer to the node to search + * + * This function checks whether a given node is part of the base node + * subtree or not + */ +static bool +ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, + struct ice_sched_node *node) +{ + u8 i; + + for (i = 0; i < base->num_children; i++) { + struct ice_sched_node *child = base->children[i]; + + if (node == child) + return true; + + if (child->tx_sched_layer > node->tx_sched_layer) + return false; + + /* this recursion is intentional, and wouldn't + * go more than 8 calls + */ + if (ice_sched_find_node_in_subtree(hw, child, node)) + return true; + } + return false; +} + +/** + * ice_sched_get_free_qparent - Get a free lan or rdma q group node + * @pi: port information structure + * @vsi_id: vsi id + * @tc: branch number + * @owner: lan or rdma + * + * This function retrieves a free lan or rdma q group node + */ +struct ice_sched_node * +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, + u8 owner) +{ + struct ice_sched_node *vsi_node, *qgrp_node = NULL; + struct ice_sched_vsi_info *list_elem; + u16 max_children; + u8 qgrp_layer; + + qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); + + list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); + if (!list_elem) + goto lan_q_exit; + + vsi_node = list_elem->vsi_node[tc]; + + /* validate invalid VSI id */ + if (!vsi_node) + goto lan_q_exit; + + /* get the first q group node from VSI sub-tree */ + qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); + while (qgrp_node) { + /* make sure the qgroup node is part of the VSI subtree */ + if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) + if (qgrp_node->num_children < max_children && + qgrp_node->owner == owner) + break; + qgrp_node = qgrp_node->sibling; + } + +lan_q_exit: + return qgrp_node; +} + +/** + * ice_sched_get_vsi_node - Get a VSI node based on VSI id + * @hw: pointer to the hw struct + * @tc_node: pointer to the TC node + * @vsi_id: VSI id + * + * This function retrieves a VSI node for a given VSI id from a given + * TC branch + */ +static struct ice_sched_node * +ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, + u16 vsi_id) +{ + struct ice_sched_node *node; + u8 vsi_layer; + + vsi_layer = ice_sched_get_vsi_layer(hw); + node = ice_sched_get_first_node(hw, tc_node, vsi_layer); + + /* Check whether it already exists */ + while (node) { + if (node->vsi_id == vsi_id) + return node; + node = node->sibling; + } + + return node; +} + +/** + * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes + * @hw: pointer to the hw struct + * @num_qs: number of queues + * @num_nodes: num nodes array + * + * This function calculates the number of VSI child nodes based on the + * number of queues. + */ +static void +ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) +{ + u16 num = num_qs; + u8 i, qgl, vsil; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + + /* calculate num nodes from q group to VSI layer */ + for (i = qgl; i > vsil; i--) { + u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); + + /* round to the next integer if there is a remainder */ + num = DIV_ROUND_UP(num, max_children); + + /* need at least one node */ + num_nodes[i] = num ? num : 1; + } +} + +/** + * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree + * @pi: port information structure + * @vsi_id: VSI id + * @tc_node: pointer to the TC node + * @num_nodes: pointer to the num nodes that needs to be added per layer + * @owner: node owner (lan or rdma) + * + * This function adds the VSI child nodes to tree. It gets called for + * lan and rdma separately. + */ +static enum ice_status +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, + struct ice_sched_node *tc_node, u16 *num_nodes, + u8 owner) +{ + struct ice_sched_node *parent, *node; + struct ice_hw *hw = pi->hw; + enum ice_status status; + u32 first_node_teid; + u16 num_added = 0; + u8 i, qgl, vsil; + + status = ice_sched_validate_for_max_nodes(pi, num_nodes); + if (status) + return status; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + for (i = vsil + 1; i <= qgl; i++) { + if (!parent) + return ICE_ERR_CFG; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, + num_nodes[i], + &first_node_teid, + &num_added); + if (status || num_nodes[i] != num_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_added) { + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + node = parent; + while (node) { + node->owner = owner; + node = node->sibling; + } + } else { + parent = parent->children[0]; + } + } + + return 0; +} + +/** + * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree + * @pi: port information structure + * @vsi_node: pointer to the VSI node + * @num_nodes: pointer to the num nodes that needs to be removed per layer + * @owner: node owner (lan or rdma) + * + * This function removes the VSI child nodes from the tree. It gets called for + * lan and rdma separately. + */ +static void +ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, + struct ice_sched_node *vsi_node, u16 *num_nodes, + u8 owner) +{ + struct ice_sched_node *node, *next; + u8 i, qgl, vsil; + u16 num; + + qgl = ice_sched_get_qgrp_layer(pi->hw); + vsil = ice_sched_get_vsi_layer(pi->hw); + + for (i = qgl; i > vsil; i--) { + num = num_nodes[i]; + node = ice_sched_get_first_node(pi->hw, vsi_node, i); + while (node && num) { + next = node->sibling; + if (node->owner == owner && !node->num_children) { + ice_free_sched_node(pi, node); + num--; + } + node = next; + } + } +} + +/** + * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes + * @hw: pointer to the hw struct + * @tc_node: pointer to TC node + * @num_nodes: pointer to num nodes array + * + * This function calculates the number of supported nodes needed to add this + * VSI into tx tree including the VSI, parent and intermediate nodes in below + * layers + */ +static void +ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, + struct ice_sched_node *tc_node, u16 *num_nodes) +{ + struct ice_sched_node *node; + u16 max_child; + u8 i, vsil; + + vsil = ice_sched_get_vsi_layer(hw); + for (i = vsil; i >= hw->sw_entry_point_layer; i--) + /* Add intermediate nodes if TC has no children and + * need at least one node for VSI + */ + if (!tc_node->num_children || i == vsil) { + num_nodes[i]++; + } else { + /* If intermediate nodes are reached max children + * then add a new one. + */ + node = ice_sched_get_first_node(hw, tc_node, i); + max_child = le16_to_cpu(hw->layer_info[i].max_children); + + /* scan all the siblings */ + while (node) { + if (node->num_children < max_child) + break; + node = node->sibling; + } + + /* all the nodes are full, allocate a new one */ + if (!node) + num_nodes[i]++; + } +} + +/** + * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree + * @pi: port information structure + * @vsi_id: VSI Id + * @tc_node: pointer to TC node + * @num_nodes: pointer to num nodes array + * + * This function adds the VSI supported nodes into tx tree including the + * VSI, its parent and intermediate nodes in below layers + */ +static enum ice_status +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, + struct ice_sched_node *tc_node, u16 *num_nodes) +{ + struct ice_sched_node *parent = tc_node; + enum ice_status status; + u32 first_node_teid; + u16 num_added = 0; + u8 i, vsil; + + if (!pi) + return ICE_ERR_PARAM; + + status = ice_sched_validate_for_max_nodes(pi, num_nodes); + if (status) + return status; + + vsil = ice_sched_get_vsi_layer(pi->hw); + for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, + i, num_nodes[i], + &first_node_teid, + &num_added); + if (status || num_nodes[i] != num_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_added) + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + else + parent = parent->children[0]; + + if (!parent) + return ICE_ERR_CFG; + + if (i == vsil) + parent->vsi_id = vsi_id; + } + return 0; +} + +/** + * ice_sched_add_vsi_to_topo - add a new VSI into tree + * @pi: port information structure + * @vsi_id: VSI Id + * @tc: TC number + * + * This function adds a new VSI into scheduler tree + */ +static enum ice_status +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) +{ + u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + struct ice_sched_node *tc_node; + struct ice_hw *hw = pi->hw; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_PARAM; + + /* calculate number of supported nodes needed for this VSI */ + ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); + + /* add vsi supported nodes to tc subtree */ + return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes); +} + +/** + * ice_sched_update_vsi_child_nodes - update VSI child nodes + * @pi: port information structure + * @vsi_id: VSI Id + * @tc: TC number + * @new_numqs: new number of max queues + * @owner: owner of this subtree + * + * This function updates the VSI child nodes based on the number of queues + */ +static enum ice_status +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, + u16 new_numqs, u8 owner) +{ + u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + struct ice_sched_node *vsi_node; + struct ice_sched_node *tc_node; + struct ice_sched_vsi_info *vsi; + enum ice_status status = 0; + struct ice_hw *hw = pi->hw; + u16 prev_numqs; + u8 i; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_CFG; + + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + if (!vsi_node) + return ICE_ERR_CFG; + + vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); + if (!vsi) + return ICE_ERR_CFG; + + if (owner == ICE_SCHED_NODE_OWNER_LAN) + prev_numqs = vsi->max_lanq[tc]; + else + return ICE_ERR_PARAM; + + /* num queues are not changed */ + if (prev_numqs == new_numqs) + return status; + + /* calculate number of nodes based on prev/new number of qs */ + if (prev_numqs) + ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes); + + if (new_numqs) + ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); + + if (prev_numqs > new_numqs) { + for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) + new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i]; + + ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes, + owner); + } else { + for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) + new_num_nodes[i] -= prev_num_nodes[i]; + + status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node, + new_num_nodes, owner); + if (status) + return status; + } + + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi->max_lanq[tc] = new_numqs; + + return status; +} + +/** + * ice_sched_cfg_vsi - configure the new/exisiting VSI + * @pi: port information structure + * @vsi_id: VSI Id + * @tc: TC number + * @maxqs: max number of queues + * @owner: lan or rdma + * @enable: TC enabled or disabled + * + * This function adds/updates VSI nodes based on the number of queues. If TC is + * enabled and VSI is in suspended state then resume the VSI back. If TC is + * disabled then suspend the VSI if it is not already. + */ +enum ice_status +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, + u8 owner, bool enable) +{ + struct ice_sched_node *vsi_node, *tc_node; + struct ice_sched_vsi_info *vsi; + enum ice_status status = 0; + struct ice_hw *hw = pi->hw; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_PARAM; + + vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); + if (!vsi) + vsi = ice_sched_create_vsi_info_entry(pi, vsi_id); + if (!vsi) + return ICE_ERR_NO_MEMORY; + + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + + /* suspend the VSI if tc is not enabled */ + if (!enable) { + if (vsi_node && vsi_node->in_use) { + u32 teid = le32_to_cpu(vsi_node->info.node_teid); + + status = ice_sched_suspend_resume_elems(hw, 1, &teid, + true); + if (!status) + vsi_node->in_use = false; + } + return status; + } + + /* TC is enabled, if it is a new VSI then add it to the tree */ + if (!vsi_node) { + status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); + if (status) + return status; + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + if (!vsi_node) + return ICE_ERR_CFG; + vsi->vsi_node[tc] = vsi_node; + vsi_node->in_use = true; + } + + /* update the VSI child nodes */ + status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner); + if (status) + return status; + + /* TC is enabled, resume the VSI if it is in the suspend state */ + if (!vsi_node->in_use) { + u32 teid = le32_to_cpu(vsi_node->info.node_teid); + + status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); + if (!status) + vsi_node->in_use = true; + } + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h new file mode 100644 index 000000000000..badadcc120d3 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_SCHED_H_ +#define _ICE_SCHED_H_ + +#include "ice_common.h" + +#define ICE_QGRP_LAYER_OFFSET 2 +#define ICE_VSI_LAYER_OFFSET 4 + +struct ice_sched_agg_vsi_info { + struct list_head list_entry; + DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u16 vsi_id; +}; + +struct ice_sched_agg_info { + struct list_head agg_vsi_list; + struct list_head list_entry; + DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u32 agg_id; + enum ice_agg_type agg_type; +}; + +/* FW AQ command calls */ +enum ice_status ice_sched_init_port(struct ice_port_info *pi); +enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +void ice_sched_cleanup_all(struct ice_hw *hw); +struct ice_sched_node * +ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); +enum ice_status +ice_sched_add_node(struct ice_port_info *pi, u8 layer, + struct ice_aqc_txsched_elem_data *info); +void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); +struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); +struct ice_sched_node * +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, + u8 owner); +enum ice_status +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, + u8 owner, bool enable); +#endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h new file mode 100644 index 000000000000..9a95c4ffd7d7 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_STATUS_H_ +#define _ICE_STATUS_H_ + +/* Error Codes */ +enum ice_status { + ICE_ERR_PARAM = -1, + ICE_ERR_NOT_IMPL = -2, + ICE_ERR_NOT_READY = -3, + ICE_ERR_BAD_PTR = -5, + ICE_ERR_INVAL_SIZE = -6, + ICE_ERR_DEVICE_NOT_SUPPORTED = -8, + ICE_ERR_RESET_FAILED = -9, + ICE_ERR_FW_API_VER = -10, + ICE_ERR_NO_MEMORY = -11, + ICE_ERR_CFG = -12, + ICE_ERR_OUT_OF_RANGE = -13, + ICE_ERR_ALREADY_EXISTS = -14, + ICE_ERR_DOES_NOT_EXIST = -15, + ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_BUF_TOO_SHORT = -52, + ICE_ERR_NVM_BLANK_MODE = -53, + ICE_ERR_AQ_ERROR = -100, + ICE_ERR_AQ_TIMEOUT = -101, + ICE_ERR_AQ_FULL = -102, + ICE_ERR_AQ_NO_WORK = -103, + ICE_ERR_AQ_EMPTY = -104, +}; + +#endif /* _ICE_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c new file mode 100644 index 000000000000..723d15f1e90b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -0,0 +1,1883 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_switch.h" + +#define ICE_ETH_DA_OFFSET 0 +#define ICE_ETH_ETHTYPE_OFFSET 12 +#define ICE_ETH_VLAN_TCI_OFFSET 14 +#define ICE_MAX_VLAN_ID 0xFFF + +/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem + * struct to configure any switch filter rules. + * {DA (6 bytes), SA(6 bytes), + * Ether type (2 bytes for header without VLAN tag) OR + * VLAN tag (4 bytes for header with VLAN tag) } + * + * Word on Hardcoded values + * byte 0 = 0x2: to identify it as locally administered DA MAC + * byte 6 = 0x2: to identify it as locally administered SA MAC + * byte 12 = 0x81 & byte 13 = 0x00: + * In case of VLAN filter first two bytes defines ether type (0x8100) + * and remaining two bytes are placeholder for programming a given VLAN id + * In case of Ether type filter it is treated as header without VLAN tag + * and byte 12 and 13 is used to program a given Ether type instead + */ +#define DUMMY_ETH_HDR_LEN 16 +static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, + 0x2, 0, 0, 0, 0, 0, + 0x81, 0, 0, 0}; + +#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1) +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lkup_rx_tx) - 1) +#define ICE_SW_RULE_LG_ACT_SIZE(n) \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lg_act) - \ + sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \ + ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) +#define ICE_SW_RULE_VSI_LIST_SIZE(n) \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_vsi_list) - \ + sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \ + ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) + +/** + * ice_aq_alloc_free_res - command to allocate/free resources + * @hw: pointer to the hw struct + * @num_entries: number of resource entries in buffer + * @buf: Indirect buffer to hold data parameters and response + * @buf_size: size of buffer for indirect commands + * @opc: pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Helper function to allocate/free resources using the admin queue commands + */ +static enum ice_status +ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aqc_alloc_free_res_cmd *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.sw_res_ctrl; + + if (!buf) + return ICE_ERR_PARAM; + + if (buf_size < (num_entries * sizeof(buf->elem[0]))) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->num_entries = cpu_to_le16(num_entries); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_aq_get_sw_cfg - get switch configuration + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of the buffer available for response + * @req_desc: pointer to requested descriptor + * @num_elems: pointer to number of elements + * @cd: pointer to command details structure or NULL + * + * Get switch configuration (0x0200) to be placed in 'buff'. + * This admin command returns information such as initial VSI/port number + * and switch ID it belongs to. + * + * NOTE: *req_desc is both an input/output parameter. + * The caller of this function first calls this function with *request_desc set + * to 0. If the response from f/w has *req_desc set to 0, all the switch + * configuration information has been returned; if non-zero (meaning not all + * the information was returned), the caller should call this function again + * with *req_desc set to the previous value returned by f/w to get the + * next block of switch configuration information. + * + * *num_elems is output only parameter. This reflects the number of elements + * in response buffer. The caller of this function to use *num_elems while + * parsing the response buffer. + */ +static enum ice_status +ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, + u16 buf_size, u16 *req_desc, u16 *num_elems, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_sw_cfg *cmd; + enum ice_status status; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); + cmd = &desc.params.get_sw_conf; + cmd->element = cpu_to_le16(*req_desc); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) { + *req_desc = le16_to_cpu(cmd->element); + *num_elems = le16_to_cpu(cmd->num_elems); + } + + return status; +} + +/** + * ice_aq_add_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Add a VSI context to the hardware (0x0210) + */ +enum ice_status +ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *res; + struct ice_aqc_add_get_update_free_vsi *cmd; + enum ice_status status; + struct ice_aq_desc desc; + + cmd = &desc.params.vsi_cmd; + res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); + + if (!vsi_ctx->alloc_from_pool) + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | + ICE_AQ_VSI_IS_VALID); + + cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cd); + + if (!status) { + vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; + vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); + } + + return status; +} + +/** + * ice_aq_update_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Update VSI context in the hardware (0x0211) + */ +enum ice_status +ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); + + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cd); + + if (!status) { + vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + } + + return status; +} + +/** + * ice_aq_free_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Get VSI context info from hardware (0x0213) + */ +enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + if (keep_vsi_alloc) + cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + } + + return status; +} + +/** + * ice_aq_alloc_free_vsi_list + * @hw: pointer to the hw struct + * @vsi_list_id: VSI list id returned or used for lookup + * @lkup_type: switch rule filter lookup type + * @opc: switch rules population command type - pass in the command opcode + * + * allocates or free a VSI list resource + */ +static enum ice_status +ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, + enum ice_sw_lkup_type lkup_type, + enum ice_adminq_opc opc) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + struct ice_aqc_res_elem *vsi_ele; + enum ice_status status; + u16 buf_len; + + buf_len = sizeof(*sw_buf); + sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + sw_buf->num_elems = cpu_to_le16(1); + + if (lkup_type == ICE_SW_LKUP_MAC || + lkup_type == ICE_SW_LKUP_MAC_VLAN || + lkup_type == ICE_SW_LKUP_ETHERTYPE || + lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + lkup_type == ICE_SW_LKUP_PROMISC || + lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { + sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); + } else if (lkup_type == ICE_SW_LKUP_VLAN) { + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); + } else { + status = ICE_ERR_PARAM; + goto ice_aq_alloc_free_vsi_list_exit; + } + + if (opc == ice_aqc_opc_free_res) + sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); + if (status) + goto ice_aq_alloc_free_vsi_list_exit; + + if (opc == ice_aqc_opc_alloc_res) { + vsi_ele = &sw_buf->elem[0]; + *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); + } + +ice_aq_alloc_free_vsi_list_exit: + devm_kfree(ice_hw_to_dev(hw), sw_buf); + return status; +} + +/** + * ice_aq_sw_rules - add/update/remove switch rules + * @hw: pointer to the hw struct + * @rule_list: pointer to switch rule population list + * @rule_list_sz: total size of the rule list in bytes + * @num_rules: number of switch rules in the rule_list + * @opc: switch rules population command type - pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware + */ +static enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + if (opc != ice_aqc_opc_add_sw_rules && + opc != ice_aqc_opc_update_sw_rules && + opc != ice_aqc_opc_remove_sw_rules) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.params.sw_rules.num_rules_fltr_entry_index = + cpu_to_le16(num_rules); + return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); +} + +/* ice_init_port_info - Initialize port_info with switch configuration data + * @pi: pointer to port_info + * @vsi_port_num: VSI number or port number + * @type: Type of switch element (port or VSI) + * @swid: switch ID of the switch the element is attached to + * @pf_vf_num: PF or VF number + * @is_vf: true if the element is a VF, false otherwise + */ +static void +ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, + u16 swid, u16 pf_vf_num, bool is_vf) +{ + switch (type) { + case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: + pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); + pi->sw_id = swid; + pi->pf_vf_num = pf_vf_num; + pi->is_vf = is_vf; + pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; + pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; + break; + default: + ice_debug(pi->hw, ICE_DBG_SW, + "incorrect VSI/port type received\n"); + break; + } +} + +/* ice_get_initial_sw_cfg - Get initial port and default VSI data + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +{ + struct ice_aqc_get_sw_cfg_resp *rbuf; + enum ice_status status; + u16 req_desc = 0; + u16 num_elems; + u16 i; + + rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, + GFP_KERNEL); + + if (!rbuf) + return ICE_ERR_NO_MEMORY; + + /* Multiple calls to ice_aq_get_sw_cfg may be required + * to get all the switch configuration information. The need + * for additional calls is indicated by ice_aq_get_sw_cfg + * writing a non-zero value in req_desc + */ + do { + status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, + &req_desc, &num_elems, NULL); + + if (status) + break; + + for (i = 0; i < num_elems; i++) { + struct ice_aqc_get_sw_cfg_resp_elem *ele; + u16 pf_vf_num, swid, vsi_port_num; + bool is_vf = false; + u8 type; + + ele = rbuf[i].elements; + vsi_port_num = le16_to_cpu(ele->vsi_port_num) & + ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; + + pf_vf_num = le16_to_cpu(ele->pf_vf_num) & + ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; + + swid = le16_to_cpu(ele->swid); + + if (le16_to_cpu(ele->pf_vf_num) & + ICE_AQC_GET_SW_CONF_RESP_IS_VF) + is_vf = true; + + type = le16_to_cpu(ele->vsi_port_num) >> + ICE_AQC_GET_SW_CONF_RESP_TYPE_S; + + if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) { + /* FW VSI is not needed. Just continue. */ + continue; + } + + ice_init_port_info(hw->port_info, vsi_port_num, + type, swid, pf_vf_num, is_vf); + } + } while (req_desc && !status); + + devm_kfree(ice_hw_to_dev(hw), (void *)rbuf); + return status; +} + +/** + * ice_fill_sw_info - Helper function to populate lb_en and lan_en + * @hw: pointer to the hardware structure + * @f_info: filter info structure to fill/update + * + * This helper function populates the lb_en and lan_en elements of the provided + * ice_fltr_info struct using the switch's type and characteristics of the + * switch rule being configured. + */ +static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info) +{ + f_info->lb_en = false; + f_info->lan_en = false; + if ((f_info->flag & ICE_FLTR_TX) && + (f_info->fltr_act == ICE_FWD_TO_VSI || + f_info->fltr_act == ICE_FWD_TO_VSI_LIST || + f_info->fltr_act == ICE_FWD_TO_Q || + f_info->fltr_act == ICE_FWD_TO_QGRP)) { + f_info->lb_en = true; + if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC && + is_unicast_ether_addr(f_info->l_data.mac.mac_addr))) + f_info->lan_en = true; + } +} + +/** + * ice_fill_sw_rule - Helper function to fill switch rule structure + * @hw: pointer to the hardware structure + * @f_info: entry containing packet forwarding information + * @s_rule: switch rule structure to be filled in based on mac_entry + * @opc: switch rules population command type - pass in the command opcode + */ +static void +ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, + struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) +{ + u16 vlan_id = ICE_MAX_VLAN_ID + 1; + u8 eth_hdr[DUMMY_ETH_HDR_LEN]; + void *daddr = NULL; + u32 act = 0; + __be16 *off; + + if (opc == ice_aqc_opc_remove_sw_rules) { + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + cpu_to_le16(f_info->fltr_rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + return; + } + + /* initialize the ether header with a dummy header */ + memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header)); + ice_fill_sw_info(hw, f_info); + + switch (f_info->fltr_act) { + case ICE_FWD_TO_VSI: + act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & + ICE_SINGLE_ACT_VSI_ID_M; + if (f_info->lkup_type != ICE_SW_LKUP_VLAN) + act |= ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_VSI_LIST: + act |= ICE_SINGLE_ACT_VSI_LIST; + act |= (f_info->fwd_id.vsi_list_id << + ICE_SINGLE_ACT_VSI_LIST_ID_S) & + ICE_SINGLE_ACT_VSI_LIST_ID_M; + if (f_info->lkup_type != ICE_SW_LKUP_VLAN) + act |= ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_Q: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + break; + case ICE_FWD_TO_QGRP: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & + ICE_SINGLE_ACT_Q_REGION_M; + break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP; + break; + default: + return; + } + + if (f_info->lb_en) + act |= ICE_SINGLE_ACT_LB_ENABLE; + if (f_info->lan_en) + act |= ICE_SINGLE_ACT_LAN_ENABLE; + + switch (f_info->lkup_type) { + case ICE_SW_LKUP_MAC: + daddr = f_info->l_data.mac.mac_addr; + break; + case ICE_SW_LKUP_VLAN: + vlan_id = f_info->l_data.vlan.vlan_id; + if (f_info->fltr_act == ICE_FWD_TO_VSI || + f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { + act |= ICE_SINGLE_ACT_PRUNE; + act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; + } + break; + case ICE_SW_LKUP_ETHERTYPE_MAC: + daddr = f_info->l_data.ethertype_mac.mac_addr; + /* fall-through */ + case ICE_SW_LKUP_ETHERTYPE: + off = (__be16 *)ð_hdr[ICE_ETH_ETHTYPE_OFFSET]; + *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); + break; + case ICE_SW_LKUP_MAC_VLAN: + daddr = f_info->l_data.mac_vlan.mac_addr; + vlan_id = f_info->l_data.mac_vlan.vlan_id; + break; + case ICE_SW_LKUP_PROMISC_VLAN: + vlan_id = f_info->l_data.mac_vlan.vlan_id; + /* fall-through */ + case ICE_SW_LKUP_PROMISC: + daddr = f_info->l_data.mac_vlan.mac_addr; + break; + default: + break; + } + + s_rule->type = (f_info->flag & ICE_FLTR_RX) ? + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + + /* Recipe set depending on lookup type */ + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + if (daddr) + ether_addr_copy(ð_hdr[ICE_ETH_DA_OFFSET], daddr); + + if (!(vlan_id > ICE_MAX_VLAN_ID)) { + off = (__be16 *)ð_hdr[ICE_ETH_VLAN_TCI_OFFSET]; + *off = cpu_to_be16(vlan_id); + } + + /* Create the switch rule with the final dummy Ethernet header */ + if (opc != ice_aqc_opc_update_sw_rules) + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr)); + + memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr)); +} + +/** + * ice_add_marker_act + * @hw: pointer to the hardware structure + * @m_ent: the management entry for which sw marker needs to be added + * @sw_marker: sw marker to tag the Rx descriptor with + * @l_id: large action resource id + * + * Create a large action to hold software marker and update the switch rule + * entry pointed by m_ent with newly created large action + */ +static enum ice_status +ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, + u16 sw_marker, u16 l_id) +{ + struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; + /* For software marker we need 3 large actions + * 1. FWD action: FWD TO VSI or VSI LIST + * 2. GENERIC VALUE action to hold the profile id + * 3. GENERIC VALUE action to hold the software marker id + */ + const u16 num_lg_acts = 3; + enum ice_status status; + u16 lg_act_size; + u16 rules_size; + u16 vsi_info; + u32 act; + + if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + /* Create two back-to-back switch rules and submit them to the HW using + * one memory buffer: + * 1. Large Action + * 2. Look up tx rx + */ + lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); + rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); + if (!lg_act) + return ICE_ERR_NO_MEMORY; + + rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); + + /* Fill in the first switch rule i.e. large action */ + lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); + lg_act->pdata.lg_act.index = cpu_to_le16(l_id); + lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); + + /* First action VSI forwarding or VSI list forwarding depending on how + * many VSIs + */ + vsi_info = (m_ent->vsi_count > 1) ? + m_ent->fltr_info.fwd_id.vsi_list_id : + m_ent->fltr_info.fwd_id.vsi_id; + + act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; + act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) & + ICE_LG_ACT_VSI_LIST_ID_M; + if (m_ent->vsi_count > 1) + act |= ICE_LG_ACT_VSI_LIST; + lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); + + /* Second action descriptor type */ + act = ICE_LG_ACT_GENERIC; + + act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; + lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); + + act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; + + /* Third action Marker value */ + act |= ICE_LG_ACT_GENERIC; + act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & + ICE_LG_ACT_GENERIC_VALUE_M; + + act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; + lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); + + /* call the fill switch rule to fill the lookup tx rx structure */ + ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, + ice_aqc_opc_update_sw_rules); + + /* Update the action to point to the large action id */ + rx_tx->pdata.lkup_tx_rx.act = + cpu_to_le32(ICE_SINGLE_ACT_PTR | + ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & + ICE_SINGLE_ACT_PTR_VAL_M)); + + /* Use the filter rule id of the previously created rule with single + * act. Once the update happens, hardware will treat this as large + * action + */ + rx_tx->pdata.lkup_tx_rx.index = + cpu_to_le16(m_ent->fltr_info.fltr_rule_id); + + status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, + ice_aqc_opc_update_sw_rules, NULL); + if (!status) { + m_ent->lg_act_idx = l_id; + m_ent->sw_marker_id = sw_marker; + } + + devm_kfree(ice_hw_to_dev(hw), lg_act); + return status; +} + +/** + * ice_create_vsi_list_map + * @hw: pointer to the hardware structure + * @vsi_array: array of VSIs to form a VSI list + * @num_vsi: num VSI in the array + * @vsi_list_id: VSI list id generated as part of allocate resource + * + * Helper function to create a new entry of VSI list id to VSI mapping + * using the given VSI list id + */ +static struct ice_vsi_list_map_info * +ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, + u16 vsi_list_id) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_vsi_list_map_info *v_map; + int i; + + v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL); + if (!v_map) + return NULL; + + v_map->vsi_list_id = vsi_list_id; + + for (i = 0; i < num_vsi; i++) + set_bit(vsi_array[i], v_map->vsi_map); + + list_add(&v_map->list_entry, &sw->vsi_list_map_head); + return v_map; +} + +/** + * ice_update_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_array: array of VSIs to form a VSI list + * @num_vsi: num VSI in the array + * @vsi_list_id: VSI list id generated as part of allocate resource + * @remove: Boolean value to indicate if this is a remove action + * @opc: switch rules population command type - pass in the command opcode + * @lkup_type: lookup type of the filter + * + * Call AQ command to add a new switch rule or update existing switch rule + * using the given VSI list id + */ +static enum ice_status +ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, + u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + u16 type; + int i; + + if (!num_vsi) + return ICE_ERR_PARAM; + + if (lkup_type == ICE_SW_LKUP_MAC || + lkup_type == ICE_SW_LKUP_MAC_VLAN || + lkup_type == ICE_SW_LKUP_ETHERTYPE || + lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + lkup_type == ICE_SW_LKUP_PROMISC || + lkup_type == ICE_SW_LKUP_PROMISC_VLAN) + type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : + ICE_AQC_SW_RULES_T_VSI_LIST_SET; + else if (lkup_type == ICE_SW_LKUP_VLAN) + type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : + ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; + else + return ICE_ERR_PARAM; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < num_vsi; i++) + s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]); + + s_rule->type = cpu_to_le16(type); + s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + + status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_create_vsi_list_rule - Creates and populates a VSI list rule + * @hw: pointer to the hw struct + * @vsi_array: array of VSIs to form a VSI list + * @num_vsi: number of VSIs in the array + * @vsi_list_id: stores the ID of the VSI list to be created + * @lkup_type: switch rule filter's lookup type + */ +static enum ice_status +ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, + u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) +{ + enum ice_status status; + int i; + + for (i = 0; i < num_vsi; i++) + if (vsi_array[i] >= ICE_MAX_VSI) + return ICE_ERR_OUT_OF_RANGE; + + status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, + ice_aqc_opc_alloc_res); + if (status) + return status; + + /* Update the newly created VSI list to include the specified VSIs */ + return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id, + false, ice_aqc_opc_add_sw_rules, + lkup_type); +} + +/** + * ice_create_pkt_fwd_rule + * @hw: pointer to the hardware structure + * @f_entry: entry containing packet forwarding information + * + * Create switch rule with given filter information and add an entry + * to the corresponding filter management list to track this switch rule + * and VSI mapping + */ +static enum ice_status +ice_create_pkt_fwd_rule(struct ice_hw *hw, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_sw_lkup_type l_type; + enum ice_status status; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), + GFP_KERNEL); + if (!fm_entry) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_pkt_fwd_rule_exit; + } + + fm_entry->fltr_info = f_entry->fltr_info; + + /* Initialize all the fields for the management entry */ + fm_entry->vsi_count = 1; + fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; + fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; + fm_entry->counter_index = ICE_INVAL_COUNTER_ID; + + ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, + ice_aqc_opc_add_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + ice_aqc_opc_add_sw_rules, NULL); + if (status) { + devm_kfree(ice_hw_to_dev(hw), fm_entry); + goto ice_create_pkt_fwd_rule_exit; + } + + f_entry->fltr_info.fltr_rule_id = + le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + fm_entry->fltr_info.fltr_rule_id = + le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + + /* The book keeping entries will get removed when base driver + * calls remove filter AQ command + */ + l_type = fm_entry->fltr_info.lkup_type; + if (l_type == ICE_SW_LKUP_MAC) { + mutex_lock(&sw->mac_list_lock); + list_add(&fm_entry->list_entry, &sw->mac_list_head); + mutex_unlock(&sw->mac_list_lock); + } else if (l_type == ICE_SW_LKUP_VLAN) { + mutex_lock(&sw->vlan_list_lock); + list_add(&fm_entry->list_entry, &sw->vlan_list_head); + mutex_unlock(&sw->vlan_list_lock); + } else if (l_type == ICE_SW_LKUP_ETHERTYPE || + l_type == ICE_SW_LKUP_ETHERTYPE_MAC) { + mutex_lock(&sw->eth_m_list_lock); + list_add(&fm_entry->list_entry, &sw->eth_m_list_head); + mutex_unlock(&sw->eth_m_list_lock); + } else if (l_type == ICE_SW_LKUP_PROMISC || + l_type == ICE_SW_LKUP_PROMISC_VLAN) { + mutex_lock(&sw->promisc_list_lock); + list_add(&fm_entry->list_entry, &sw->promisc_list_head); + mutex_unlock(&sw->promisc_list_lock); + } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) { + mutex_lock(&sw->mac_vlan_list_lock); + list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head); + mutex_unlock(&sw->mac_vlan_list_lock); + } else { + status = ICE_ERR_NOT_IMPL; + } +ice_create_pkt_fwd_rule_exit: + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_update_pkt_fwd_rule + * @hw: pointer to the hardware structure + * @rule_id: rule of previously created switch rule to update + * @vsi_list_id: VSI list id to be updated with + * @f_info: ice_fltr_info to pull other information for switch rule + * + * Call AQ command to update a previously created switch rule with a + * VSI list id + */ +static enum ice_status +ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, + struct ice_fltr_info f_info) +{ + struct ice_aqc_sw_rules_elem *s_rule; + struct ice_fltr_info tmp_fltr; + enum ice_status status; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + tmp_fltr = f_info; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + + ice_fill_sw_rule(hw, &tmp_fltr, s_rule, + ice_aqc_opc_update_sw_rules); + + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + + /* Update switch rule with new rule set to forward VSI list */ + status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_handle_vsi_list_mgmt + * @hw: pointer to the hardware structure + * @m_entry: pointer to current filter management list entry + * @cur_fltr: filter information from the book keeping entry + * @new_fltr: filter information with the new VSI to be added + * + * Call AQ command to add or update previously created VSI list with new VSI. + * + * Helper function to do book keeping associated with adding filter information + * The algorithm to do the booking keeping is described below : + * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.) + * if only one VSI has been added till now + * Allocate a new VSI list and add two VSIs + * to this list using switch rule command + * Update the previously created switch rule with the + * newly created VSI list id + * if a VSI list was previously created + * Add the new VSI to the previously created VSI list set + * using the update switch rule command + */ +static enum ice_status +ice_handle_vsi_list_mgmt(struct ice_hw *hw, + struct ice_fltr_mgmt_list_entry *m_entry, + struct ice_fltr_info *cur_fltr, + struct ice_fltr_info *new_fltr) +{ + enum ice_status status = 0; + u16 vsi_list_id = 0; + + if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || + cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) + return ICE_ERR_NOT_IMPL; + + if ((new_fltr->fltr_act == ICE_FWD_TO_Q || + new_fltr->fltr_act == ICE_FWD_TO_QGRP) && + (cur_fltr->fltr_act == ICE_FWD_TO_VSI || + cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) + return ICE_ERR_NOT_IMPL; + + if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { + /* Only one entry existed in the mapping and it was not already + * a part of a VSI list. So, create a VSI list with the old and + * new VSIs. + */ + u16 vsi_id_arr[2]; + u16 fltr_rule; + + /* A rule already exists with the new VSI being added */ + if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id) + return ICE_ERR_ALREADY_EXISTS; + + vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id; + vsi_id_arr[1] = new_fltr->fwd_id.vsi_id; + status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2, + &vsi_list_id, + new_fltr->lkup_type); + if (status) + return status; + + fltr_rule = cur_fltr->fltr_rule_id; + /* Update the previous switch rule of "MAC forward to VSI" to + * "MAC fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id, + *new_fltr); + if (status) + return status; + + cur_fltr->fwd_id.vsi_list_id = vsi_list_id; + cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; + m_entry->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2, + vsi_list_id); + + /* If this entry was large action then the large action needs + * to be updated to point to FWD to VSI list + */ + if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) + status = + ice_add_marker_act(hw, m_entry, + m_entry->sw_marker_id, + m_entry->lg_act_idx); + } else { + u16 vsi_id = new_fltr->fwd_id.vsi_id; + enum ice_adminq_opc opcode; + + /* A rule already exists with the new VSI being added */ + if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map)) + return 0; + + /* Update the previously created VSI list set with + * the new VSI id passed in + */ + vsi_list_id = cur_fltr->fwd_id.vsi_list_id; + opcode = ice_aqc_opc_update_sw_rules; + + status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, + false, opcode, + new_fltr->lkup_type); + /* update VSI list mapping info with new VSI id */ + if (!status) + set_bit(vsi_id, m_entry->vsi_list_info->vsi_map); + } + if (!status) + m_entry->vsi_count++; + return status; +} + +/** + * ice_find_mac_entry + * @hw: pointer to the hardware structure + * @mac_addr: MAC address to search for + * + * Helper function to search for a MAC entry using a given MAC address + * Returns pointer to the entry if found. + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr) +{ + struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL; + struct ice_switch_info *sw = hw->switch_info; + + mutex_lock(&sw->mac_list_lock); + list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) { + u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; + + if (ether_addr_equal(buf, mac_addr)) { + mac_ret = m_list_itr; + break; + } + } + mutex_unlock(&sw->mac_list_lock); + return mac_ret; +} + +/** + * ice_add_shared_mac - Add one MAC shared filter rule + * @hw: pointer to the hardware structure + * @f_entry: structure containing MAC forwarding information + * + * Adds or updates the book keeping list for the MAC addresses + */ +static enum ice_status +ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_info *new_fltr, *cur_fltr; + struct ice_fltr_mgmt_list_entry *m_entry; + + new_fltr = &f_entry->fltr_info; + + m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]); + if (!m_entry) + return ice_create_pkt_fwd_rule(hw, f_entry); + + cur_fltr = &m_entry->fltr_info; + + return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr); +} + +/** + * ice_add_mac - Add a MAC address based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * + * IMPORTANT: When the ucast_shared flag is set to false and m_list has + * multiple unicast addresses, the function assumes that all the + * addresses are unique in a given add_mac call. It doesn't + * check for duplicates in this case, removing duplicates from a given + * list should be taken care of in the caller of this function. + */ +enum ice_status +ice_add_mac(struct ice_hw *hw, struct list_head *m_list) +{ + struct ice_aqc_sw_rules_elem *s_rule, *r_iter; + struct ice_fltr_list_entry *m_list_itr; + u16 elem_sent, total_elem_left; + enum ice_status status = 0; + u16 num_unicast = 0; + u16 s_rule_size; + + if (!m_list || !hw) + return ICE_ERR_PARAM; + + list_for_each_entry(m_list_itr, m_list, list_entry) { + u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; + + if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + if (is_zero_ether_addr(add)) + return ICE_ERR_PARAM; + if (is_unicast_ether_addr(add) && !hw->ucast_shared) { + /* Don't overwrite the unicast address */ + if (ice_find_mac_entry(hw, add)) + return ICE_ERR_ALREADY_EXISTS; + num_unicast++; + } else if (is_multicast_ether_addr(add) || + (is_unicast_ether_addr(add) && hw->ucast_shared)) { + status = ice_add_shared_mac(hw, m_list_itr); + if (status) { + m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; + return status; + } + m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + } + } + + /* Exit if no suitable entries were found for adding bulk switch rule */ + if (!num_unicast) + return 0; + + /* Allocate switch rule buffer for the bulk update for unicast */ + s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, + GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + r_iter = s_rule; + list_for_each_entry(m_list_itr, m_list, list_entry) { + struct ice_fltr_info *f_info = &m_list_itr->fltr_info; + u8 *addr = &f_info->l_data.mac.mac_addr[0]; + + if (is_unicast_ether_addr(addr)) { + ice_fill_sw_rule(hw, &m_list_itr->fltr_info, + r_iter, ice_aqc_opc_add_sw_rules); + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + } + + /* Call AQ bulk switch rule update for all unicast addresses */ + r_iter = s_rule; + /* Call AQ switch rule in AQ_MAX chunk */ + for (total_elem_left = num_unicast; total_elem_left > 0; + total_elem_left -= elem_sent) { + struct ice_aqc_sw_rules_elem *entry = r_iter; + + elem_sent = min(total_elem_left, + (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); + status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, + elem_sent, ice_aqc_opc_add_sw_rules, + NULL); + if (status) + goto ice_add_mac_exit; + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + (elem_sent * s_rule_size)); + } + + /* Fill up rule id based on the value returned from FW */ + r_iter = s_rule; + list_for_each_entry(m_list_itr, m_list, list_entry) { + struct ice_fltr_info *f_info = &m_list_itr->fltr_info; + u8 *addr = &f_info->l_data.mac.mac_addr[0]; + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + + if (is_unicast_ether_addr(addr)) { + f_info->fltr_rule_id = + le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); + f_info->fltr_act = ICE_FWD_TO_VSI; + /* Create an entry to track this MAC address */ + fm_entry = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fm_entry), GFP_KERNEL); + if (!fm_entry) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } + fm_entry->fltr_info = *f_info; + fm_entry->vsi_count = 1; + /* The book keeping entries will get removed when + * base driver calls remove filter AQ command + */ + mutex_lock(&sw->mac_list_lock); + list_add(&fm_entry->list_entry, &sw->mac_list_head); + mutex_unlock(&sw->mac_list_lock); + + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + } + +ice_add_mac_exit: + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_find_vlan_entry + * @hw: pointer to the hardware structure + * @vlan_id: VLAN id to search for + * + * Helper function to search for a VLAN entry using a given VLAN id + * Returns pointer to the entry if found. + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) +{ + struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL; + struct ice_switch_info *sw = hw->switch_info; + + mutex_lock(&sw->vlan_list_lock); + list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry) + if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) { + vlan_ret = vlan_list_itr; + break; + } + + mutex_unlock(&sw->vlan_list_lock); + return vlan_ret; +} + +/** + * ice_add_vlan_internal - Add one VLAN based filter rule + * @hw: pointer to the hardware structure + * @f_entry: filter entry containing one VLAN information + */ +static enum ice_status +ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_info *new_fltr, *cur_fltr; + struct ice_fltr_mgmt_list_entry *v_list_itr; + u16 vlan_id; + + new_fltr = &f_entry->fltr_info; + /* VLAN id should only be 12 bits */ + if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) + return ICE_ERR_PARAM; + + vlan_id = new_fltr->l_data.vlan.vlan_id; + v_list_itr = ice_find_vlan_entry(hw, vlan_id); + if (!v_list_itr) { + u16 vsi_id = ICE_VSI_INVAL_ID; + enum ice_status status; + u16 vsi_list_id = 0; + + if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { + enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type; + + /* All VLAN pruning rules use a VSI list. + * Convert the action to forwarding to a VSI list. + */ + vsi_id = new_fltr->fwd_id.vsi_id; + status = ice_create_vsi_list_rule(hw, &vsi_id, 1, + &vsi_list_id, + lkup_type); + if (status) + return status; + new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; + new_fltr->fwd_id.vsi_list_id = vsi_list_id; + } + + status = ice_create_pkt_fwd_rule(hw, f_entry); + if (!status && vsi_id != ICE_VSI_INVAL_ID) { + v_list_itr = ice_find_vlan_entry(hw, vlan_id); + if (!v_list_itr) + return ICE_ERR_DOES_NOT_EXIST; + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_id, 1, + vsi_list_id); + } + + return status; + } + + cur_fltr = &v_list_itr->fltr_info; + return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr); +} + +/** + * ice_add_vlan - Add VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN entries and forwarding information + */ +enum ice_status +ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) +{ + struct ice_fltr_list_entry *v_list_itr; + + if (!v_list || !hw) + return ICE_ERR_PARAM; + + list_for_each_entry(v_list_itr, v_list, list_entry) { + enum ice_status status; + + if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + + status = ice_add_vlan_internal(hw, v_list_itr); + if (status) { + v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; + return status; + } + v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + } + return 0; +} + +/** + * ice_remove_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_list_id: VSI list id generated as part of allocate resource + * @lkup_type: switch rule filter lookup type + */ +static enum ice_status +ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + /* FW expects number of VSIs in vsi_list resource to be 0 for clear + * command. Since memory is zero'ed out during initialization, it's not + * necessary to explicitly initialize the variable to 0. + */ + + status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (!status) + /* Free the vsi_list resource that we allocated */ + status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, + ice_aqc_opc_free_res); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_handle_rem_vsi_list_mgmt + * @hw: pointer to the hardware structure + * @vsi_id: ID of the VSI to remove + * @fm_list_itr: filter management entry for which the VSI list management + * needs to be done + */ +static enum ice_status +ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id, + struct ice_fltr_mgmt_list_entry *fm_list_itr) +{ + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status = 0; + enum ice_sw_lkup_type lkup_type; + bool is_last_elem = true; + bool conv_list = false; + bool del_list = false; + u16 vsi_list_id; + + lkup_type = fm_list_itr->fltr_info.lkup_type; + vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id; + + if (fm_list_itr->vsi_count > 1) { + status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, + true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + fm_list_itr->vsi_count--; + is_last_elem = false; + clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map); + } + + /* For non-VLAN rules that forward packets to a VSI list, convert them + * to forwarding packets to a VSI if there is only one VSI left in the + * list. Unused lists are then removed. + * VLAN rules need to use VSI lists even with only one VSI. + */ + if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) { + if (lkup_type == ICE_SW_LKUP_VLAN) { + del_list = is_last_elem; + } else if (fm_list_itr->vsi_count == 1) { + conv_list = true; + del_list = true; + } + } + + if (del_list) { + /* Remove the VSI list since it is no longer used */ + struct ice_vsi_list_map_info *vsi_list_info = + fm_list_itr->vsi_list_info; + + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) + return status; + + if (conv_list) { + u16 rem_vsi_id; + + rem_vsi_id = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + + /* Error out when the expected last element is not in + * the VSI list map + */ + if (rem_vsi_id == ICE_MAX_VSI) + return ICE_ERR_OUT_OF_RANGE; + + /* Change the list entry action from VSI_LIST to VSI */ + fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id; + } + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list_itr->vsi_list_info = NULL; + } + + if (conv_list) { + /* Convert the rule's forward action to forwarding packets to + * a VSI + */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, + GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, + ice_aqc_opc_update_sw_rules); + + s_rule->pdata.lkup_tx_rx.index = + cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + devm_kfree(ice_hw_to_dev(hw), s_rule); + if (status) + return status; + } + + if (is_last_elem) { + /* Remove the lookup rule */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, + GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, + ice_aqc_opc_remove_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (status) + return status; + + /* Remove a book keeping entry from the MAC address list */ + mutex_lock(&sw->mac_list_lock); + list_del(&fm_list_itr->list_entry); + mutex_unlock(&sw->mac_list_lock); + devm_kfree(ice_hw_to_dev(hw), fm_list_itr); + devm_kfree(ice_hw_to_dev(hw), s_rule); + } + return status; +} + +/** + * ice_remove_mac_entry + * @hw: pointer to the hardware structure + * @f_entry: structure containing MAC forwarding information + */ +static enum ice_status +ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_mgmt_list_entry *m_entry; + u16 vsi_id; + u8 *add; + + add = &f_entry->fltr_info.l_data.mac.mac_addr[0]; + + m_entry = ice_find_mac_entry(hw, add); + if (!m_entry) + return ICE_ERR_PARAM; + + vsi_id = f_entry->fltr_info.fwd_id.vsi_id; + return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry); +} + +/** + * ice_remove_mac - remove a MAC address based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * + * This function removes either a MAC filter rule or a specific VSI from a + * VSI list for a multicast MAC address. + * + * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * ice_add_mac. Caller should be aware that this call will only work if all + * the entries passed into m_list were added previously. It will not attempt to + * do a partial remove of entries that were found. + */ +enum ice_status +ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) +{ + struct ice_aqc_sw_rules_elem *s_rule, *r_iter; + u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *m_entry; + struct ice_fltr_list_entry *m_list_itr; + u16 elem_sent, total_elem_left; + enum ice_status status = 0; + u16 num_unicast = 0; + + if (!m_list) + return ICE_ERR_PARAM; + + list_for_each_entry(m_list_itr, m_list, list_entry) { + u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; + + if (is_unicast_ether_addr(addr) && !hw->ucast_shared) + num_unicast++; + else if (is_multicast_ether_addr(addr) || + (is_unicast_ether_addr(addr) && hw->ucast_shared)) + ice_remove_mac_entry(hw, m_list_itr); + } + + /* Exit if no unicast addresses found. Multicast switch rules + * were added individually + */ + if (!num_unicast) + return 0; + + /* Allocate switch rule buffer for the bulk update for unicast */ + s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, + GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + r_iter = s_rule; + list_for_each_entry(m_list_itr, m_list, list_entry) { + u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; + + if (is_unicast_ether_addr(addr)) { + m_entry = ice_find_mac_entry(hw, addr); + if (!m_entry) { + status = ICE_ERR_DOES_NOT_EXIST; + goto ice_remove_mac_exit; + } + + ice_fill_sw_rule(hw, &m_entry->fltr_info, + r_iter, ice_aqc_opc_remove_sw_rules); + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + } + + /* Call AQ bulk switch rule update for all unicast addresses */ + r_iter = s_rule; + /* Call AQ switch rule in AQ_MAX chunk */ + for (total_elem_left = num_unicast; total_elem_left > 0; + total_elem_left -= elem_sent) { + struct ice_aqc_sw_rules_elem *entry = r_iter; + + elem_sent = min(total_elem_left, + (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); + status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, + elem_sent, ice_aqc_opc_remove_sw_rules, + NULL); + if (status) + break; + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + + list_for_each_entry(m_list_itr, m_list, list_entry) { + u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; + + if (is_unicast_ether_addr(addr)) { + m_entry = ice_find_mac_entry(hw, addr); + if (!m_entry) + return ICE_ERR_OUT_OF_RANGE; + mutex_lock(&sw->mac_list_lock); + list_del(&m_entry->list_entry); + mutex_unlock(&sw->mac_list_lock); + devm_kfree(ice_hw_to_dev(hw), m_entry); + } + } + +ice_remove_mac_exit: + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default + * VSI for the switch (represented by swid) + * @hw: pointer to the hardware structure + * @vsi_id: number of VSI to set as default + * @set: true to add the above mentioned switch rule, false to remove it + * @direction: ICE_FLTR_RX or ICE_FLTR_TX + */ +enum ice_status +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) +{ + struct ice_aqc_sw_rules_elem *s_rule; + struct ice_fltr_info f_info; + enum ice_adminq_opc opcode; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : + ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + memset(&f_info, 0, sizeof(f_info)); + + f_info.lkup_type = ICE_SW_LKUP_DFLT; + f_info.flag = direction; + f_info.fltr_act = ICE_FWD_TO_VSI; + f_info.fwd_id.vsi_id = vsi_id; + + if (f_info.flag & ICE_FLTR_RX) { + f_info.src = hw->port_info->lport; + if (!set) + f_info.fltr_rule_id = + hw->port_info->dflt_rx_vsi_rule_id; + } else if (f_info.flag & ICE_FLTR_TX) { + f_info.src = vsi_id; + if (!set) + f_info.fltr_rule_id = + hw->port_info->dflt_tx_vsi_rule_id; + } + + if (set) + opcode = ice_aqc_opc_add_sw_rules; + else + opcode = ice_aqc_opc_remove_sw_rules; + + ice_fill_sw_rule(hw, &f_info, s_rule, opcode); + + status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); + if (status || !(f_info.flag & ICE_FLTR_TX_RX)) + goto out; + if (set) { + u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + + if (f_info.flag & ICE_FLTR_TX) { + hw->port_info->dflt_tx_vsi_num = vsi_id; + hw->port_info->dflt_tx_vsi_rule_id = index; + } else if (f_info.flag & ICE_FLTR_RX) { + hw->port_info->dflt_rx_vsi_num = vsi_id; + hw->port_info->dflt_rx_vsi_rule_id = index; + } + } else { + if (f_info.flag & ICE_FLTR_TX) { + hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; + hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; + } else if (f_info.flag & ICE_FLTR_RX) { + hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; + hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; + } + } + +out: + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_remove_vlan_internal - Remove one VLAN based filter rule + * @hw: pointer to the hardware structure + * @f_entry: filter entry containing one VLAN information + */ +static enum ice_status +ice_remove_vlan_internal(struct ice_hw *hw, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_info *new_fltr; + struct ice_fltr_mgmt_list_entry *v_list_elem; + u16 vsi_id; + + new_fltr = &f_entry->fltr_info; + + v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id); + if (!v_list_elem) + return ICE_ERR_PARAM; + + vsi_id = f_entry->fltr_info.fwd_id.vsi_id; + return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem); +} + +/** + * ice_remove_vlan - Remove VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN entries and forwarding information + */ +enum ice_status +ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) +{ + struct ice_fltr_list_entry *v_list_itr; + enum ice_status status = 0; + + if (!v_list || !hw) + return ICE_ERR_PARAM; + + list_for_each_entry(v_list_itr, v_list, list_entry) { + status = ice_remove_vlan_internal(hw, v_list_itr); + if (status) { + v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; + return status; + } + v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + } + return status; +} + +/** + * ice_add_to_vsi_fltr_list - Add VSI filters to the list + * @hw: pointer to the hardware structure + * @vsi_id: ID of VSI to remove filters from + * @lkup_list_head: pointer to the list that has certain lookup type filters + * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id + */ +static enum ice_status +ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, + struct list_head *lkup_list_head, + struct list_head *vsi_list_head) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + + /* check to make sure VSI id is valid and within boundary */ + if (vsi_id >= + (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1)) + return ICE_ERR_PARAM; + + list_for_each_entry(fm_entry, lkup_list_head, list_entry) { + struct ice_fltr_info *fi; + + fi = &fm_entry->fltr_info; + if ((fi->fltr_act == ICE_FWD_TO_VSI && + fi->fwd_id.vsi_id == vsi_id) || + (fi->fltr_act == ICE_FWD_TO_VSI_LIST && + (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) { + struct ice_fltr_list_entry *tmp; + + /* this memory is freed up in the caller function + * ice_remove_vsi_lkup_fltr() once filters for + * this VSI are removed + */ + tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), + GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + memcpy(&tmp->fltr_info, fi, sizeof(*fi)); + + /* Expected below fields to be set to ICE_FWD_TO_VSI and + * the particular VSI id since we are only removing this + * one VSI + */ + if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) { + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.fwd_id.vsi_id = vsi_id; + } + + list_add(&tmp->list_entry, vsi_list_head); + } + } + return 0; +} + +/** + * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI + * @hw: pointer to the hardware structure + * @vsi_id: ID of VSI to remove filters from + * @lkup: switch rule filter lookup type + */ +static void +ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id, + enum ice_sw_lkup_type lkup) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *fm_entry; + struct list_head remove_list_head; + struct ice_fltr_list_entry *tmp; + enum ice_status status; + + INIT_LIST_HEAD(&remove_list_head); + switch (lkup) { + case ICE_SW_LKUP_MAC: + mutex_lock(&sw->mac_list_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_id, + &sw->mac_list_head, + &remove_list_head); + mutex_unlock(&sw->mac_list_lock); + if (!status) { + ice_remove_mac(hw, &remove_list_head); + goto free_fltr_list; + } + break; + case ICE_SW_LKUP_VLAN: + mutex_lock(&sw->vlan_list_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_id, + &sw->vlan_list_head, + &remove_list_head); + mutex_unlock(&sw->vlan_list_lock); + if (!status) { + ice_remove_vlan(hw, &remove_list_head); + goto free_fltr_list; + } + break; + case ICE_SW_LKUP_MAC_VLAN: + case ICE_SW_LKUP_ETHERTYPE: + case ICE_SW_LKUP_ETHERTYPE_MAC: + case ICE_SW_LKUP_PROMISC: + case ICE_SW_LKUP_PROMISC_VLAN: + case ICE_SW_LKUP_DFLT: + ice_debug(hw, ICE_DBG_SW, + "Remove filters for this lookup type hasn't been implemented yet\n"); + break; + } + + return; +free_fltr_list: + list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { + list_del(&fm_entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), fm_entry); + } +} + +/** + * ice_remove_vsi_fltr - Remove all filters for a VSI + * @hw: pointer to the hardware structure + * @vsi_id: ID of VSI to remove filters from + */ +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id) +{ + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN); +} diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h new file mode 100644 index 000000000000..6f4a0d159dbf --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_SWITCH_H_ +#define _ICE_SWITCH_H_ + +#include "ice_common.h" + +#define ICE_SW_CFG_MAX_BUF_LEN 2048 +#define ICE_DFLT_VSI_INVAL 0xff +#define ICE_VSI_INVAL_ID 0xffff + +/* VSI context structure for add/get/update/free operations */ +struct ice_vsi_ctx { + u16 vsi_num; + u16 vsis_allocd; + u16 vsis_unallocated; + u16 flags; + struct ice_aqc_vsi_props info; + bool alloc_from_pool; +}; + +enum ice_sw_fwd_act_type { + ICE_FWD_TO_VSI = 0, + ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */ + ICE_FWD_TO_Q, + ICE_FWD_TO_QGRP, + ICE_DROP_PACKET, + ICE_INVAL_ACT +}; + +/* Switch recipe ID enum values are specific to hardware */ +enum ice_sw_lkup_type { + ICE_SW_LKUP_ETHERTYPE = 0, + ICE_SW_LKUP_MAC = 1, + ICE_SW_LKUP_MAC_VLAN = 2, + ICE_SW_LKUP_PROMISC = 3, + ICE_SW_LKUP_VLAN = 4, + ICE_SW_LKUP_DFLT = 5, + ICE_SW_LKUP_ETHERTYPE_MAC = 8, + ICE_SW_LKUP_PROMISC_VLAN = 9, +}; + +struct ice_fltr_info { + /* Look up information: how to look up packet */ + enum ice_sw_lkup_type lkup_type; + /* Forward action: filter action to do after lookup */ + enum ice_sw_fwd_act_type fltr_act; + /* rule ID returned by firmware once filter rule is created */ + u16 fltr_rule_id; + u16 flag; +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) + + /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ + u16 src; + + union { + struct { + u8 mac_addr[ETH_ALEN]; + } mac; + struct { + u8 mac_addr[ETH_ALEN]; + u16 vlan_id; + } mac_vlan; + struct { + u16 vlan_id; + } vlan; + /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE + * if just using ethertype as filter. Set lkup_type as + * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be + * passed in as filter. + */ + struct { + u16 ethertype; + u8 mac_addr[ETH_ALEN]; /* optional */ + } ethertype_mac; + } l_data; + + /* Depending on filter action */ + union { + /* queue id in case of ICE_FWD_TO_Q and starting + * queue id in case of ICE_FWD_TO_QGRP. + */ + u16 q_id:11; + u16 vsi_id:10; + u16 vsi_list_id:10; + } fwd_id; + + /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field + * determines the range of queues the packet needs to be forwarded to + */ + u8 qgrp_size; + + /* Rule creations populate these indicators basing on the switch type */ + bool lb_en; /* Indicate if packet can be looped back */ + bool lan_en; /* Indicate if packet can be forwarded to the uplink */ +}; + +/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ +struct ice_vsi_list_map_info { + struct list_head list_entry; + DECLARE_BITMAP(vsi_map, ICE_MAX_VSI); + u16 vsi_list_id; +}; + +enum ice_sw_fltr_status { + ICE_FLTR_STATUS_NEW = 0, + ICE_FLTR_STATUS_FW_SUCCESS, + ICE_FLTR_STATUS_FW_FAIL, +}; + +struct ice_fltr_list_entry { + struct list_head list_entry; + enum ice_sw_fltr_status status; + struct ice_fltr_info fltr_info; +}; + +/* This defines an entry in the list that maintains MAC or VLAN membership + * to HW list mapping, since multiple VSIs can subscribe to the same MAC or + * VLAN. As an optimization the VSI list should be created only when a + * second VSI becomes a subscriber to the VLAN address. + */ +struct ice_fltr_mgmt_list_entry { + /* back pointer to VSI list id to VSI list mapping */ + struct ice_vsi_list_map_info *vsi_list_info; + u16 vsi_count; +#define ICE_INVAL_LG_ACT_INDEX 0xffff + u16 lg_act_idx; +#define ICE_INVAL_SW_MARKER_ID 0xffff + u16 sw_marker_id; + struct list_head list_entry; + struct ice_fltr_info fltr_info; +#define ICE_INVAL_COUNTER_ID 0xff + u8 counter_index; +}; + +/* VSI related commands */ +enum ice_status +ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); + +enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); + +/* Switch/bridge related commands */ +enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); +enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id); +enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); +enum ice_status +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction); + +#endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c new file mode 100644 index 000000000000..6481e3d86374 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -0,0 +1,1782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* The driver transmit and receive code */ + +#include <linux/prefetch.h> +#include <linux/mm.h> +#include "ice.h" + +#define ICE_RX_HDR_SIZE 256 + +/** + * ice_unmap_and_free_tx_buf - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buf: the buffer to free + */ +static void +ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) +{ + if (tx_buf->skb) { + dev_kfree_skb_any(tx_buf->skb); + if (dma_unmap_len(tx_buf, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + /* tx_buf must be completely set up in the transmit path */ +} + +static struct netdev_queue *txring_txq(const struct ice_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->q_index); +} + +/** + * ice_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + */ +void ice_clean_tx_ring(struct ice_ring *tx_ring) +{ + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buf) + return; + + /* Free all the Tx ring sk_bufss */ + for (i = 0; i < tx_ring->count; i++) + ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); + + size = sizeof(struct ice_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +/** + * ice_free_tx_ring - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void ice_free_tx_ring(struct ice_ring *tx_ring) +{ + ice_clean_tx_ring(tx_ring); + devm_kfree(tx_ring->dev, tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + if (tx_ring->desc) { + dmam_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * ice_clean_tx_irq - Reclaim resources after transmit completes + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, + int napi_budget) +{ + unsigned int total_bytes = 0, total_pkts = 0; + unsigned int budget = vsi->work_lmt; + s16 i = tx_ring->next_to_clean; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = ICE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + smp_rmb(); /* prevent any other reads prior to eop_desc */ + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_pkts += tx_buf->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buf data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_buf; + tx_desc = ICE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_buf; + tx_desc = ICE_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.pkts += total_pkts; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_pkts += total_pkts; + + netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, + total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && + (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->q_index) && + !test_bit(__ICE_DOWN, vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->q_index); + ++tx_ring->tx_stats.restart_q; + } + } + + return !!budget; +} + +/** + * ice_setup_tx_ring - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + */ +int ice_setup_tx_ring(struct ice_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_buf); + bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; + tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); + if (!tx_ring->tx_buf) + return -ENOMEM; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL); + if (!tx_ring->desc) { + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + devm_kfree(dev, tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + return -ENOMEM; +} + +/** + * ice_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + */ +void ice_clean_rx_ring(struct ice_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buf) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + + if (rx_buf->skb) { + dev_kfree_skb(rx_buf->skb); + rx_buf->skb = NULL; + } + if (!rx_buf->page) + continue; + + dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_buf->page, 0); + + rx_buf->page = NULL; + rx_buf->page_offset = 0; + } + + size = sizeof(struct ice_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * ice_free_rx_ring - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void ice_free_rx_ring(struct ice_ring *rx_ring) +{ + ice_clean_rx_ring(rx_ring); + devm_kfree(rx_ring->dev, rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dmam_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * ice_setup_rx_ring - Allocate the Rx descriptors + * @rx_ring: the rx ring to set up + * + * Return 0 on success, negative on error + */ +int ice_setup_rx_ring(struct ice_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_buf); + bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; + rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); + if (!rx_ring->rx_buf) + return -ENOMEM; + + /* round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL); + if (!rx_ring->desc) { + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + return 0; + +err: + devm_kfree(dev, rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + return -ENOMEM; +} + +/** + * ice_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + */ +static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * ice_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buf struct to modify + * + * Returns true if the page was successfully allocated or + * reused. + */ +static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, + struct ice_rx_buf *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ice_alloc_rx_bufs - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + */ +bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) +{ + union ice_32b_rx_flex_desc *rx_desc; + u16 ntu = rx_ring->next_to_use; + struct ice_rx_buf *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + /* get the RX descriptor and buffer based on next_to_use */ + rx_desc = ICE_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_buf[ntu]; + + do { + if (!ice_alloc_mapped_page(rx_ring, bi)) + goto no_bufs; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = ICE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buf; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.status_error0 = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + ice_release_rx_desc(rx_ring, ntu); + + return false; + +no_bufs: + if (rx_ring->next_to_use != ntu) + ice_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +/** + * ice_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static bool ice_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ice_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_buf: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buf to place the data into + * + * This function will add the data contained in rx_buf->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + */ +static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ICE_RXBUF_2048; +#else + unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; + unsigned int truesize; +#endif /* PAGE_SIZE < 8192) */ + + struct page *page; + unsigned int size; + + size = le16_to_cpu(rx_desc->wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + + page = rx_buf->page; + +#if (PAGE_SIZE >= 8192) + truesize = ALIGN(size, L1_CACHE_BYTES); +#endif /* PAGE_SIZE >= 8192) */ + + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buf->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ice_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buf->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ice_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buf->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buf->page_offset += truesize; + + if (rx_buf->page_offset > last_offset) + return false; +#endif /* PAGE_SIZE < 8192) */ + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buf->page); + + return true; +} + +/** + * ice_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buf: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void ice_reuse_rx_page(struct ice_ring *rx_ring, + struct ice_rx_buf *old_buf) +{ + u16 nta = rx_ring->next_to_alloc; + struct ice_rx_buf *new_buf; + + new_buf = &rx_ring->rx_buf[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buf = *old_buf; +} + +/** + * ice_fetch_rx_buf - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware + * + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_rx_buf *rx_buf; + struct sk_buff *skb; + struct page *page; + + rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; + page = rx_buf->page; + prefetchw(page); + + skb = rx_buf->skb; + + if (likely(!skb)) { + u8 *page_addr = page_address(page) + rx_buf->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch((void *)(page_addr + L1_CACHE_BYTES)); +#endif /* L1_CACHE_BYTES */ + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + ICE_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buf_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + skb_record_rx_queue(skb, rx_ring->q_index); + } else { + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, + rx_buf->page_offset, + ICE_RXBUF_2048, + DMA_FROM_DEVICE); + + rx_buf->skb = NULL; + } + + /* pull page into skb */ + if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ice_reuse_rx_page(rx_ring, rx_buf); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buf->page = NULL; + + return skb; +} + +/** + * ice_pull_tail - ice specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an ice specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ice_pull_tail(struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ice_cleanup_headers - Correct empty headers + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool ice_cleanup_headers(struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ice_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ice_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, + const u16 stat_err_bits) +{ + return !!(rx_desc->wb.status_error0 & + cpu_to_le16(stat_err_bits)); +} + +/** + * ice_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool ice_is_non_eop(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(ICE_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) + if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_buf[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * ice_ptype_to_htype - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + */ +static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) +{ + return PKT_HASH_TYPE_NONE; +} + +/** + * ice_rx_hash - set the hash value in the skb + * @rx_ring: descriptor ring + * @rx_desc: specific descriptor + * @skb: pointer to current skb + * @rx_ptype: the ptype value from the descriptor + */ +static void +ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 rx_ptype) +{ + struct ice_32b_rx_flex_desc_nic *nic_mdid; + u32 hash; + + if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) + return; + + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + hash = le32_to_cpu(nic_mdid->rss_hash); + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); +} + +/** + * ice_rx_csum - Indicate in skb if checksum is good + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: the packet type decoded by hardware + * + * skb->protocol must be set before this function is called + */ +static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) +{ + struct ice_rx_ptype_decoded decoded; + u32 rx_error, rx_status; + bool ipv4, ipv6; + + rx_status = le16_to_cpu(rx_desc->wb.status_error0); + rx_error = rx_status; + + decoded = ice_decode_rx_desc_ptype(ptype); + + /* Start with CHECKSUM_NONE and by default csum_level = 0 */ + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + /* check if Rx checksum is enabled */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if HW has decoded the packet and checksum */ + if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) + return; + + if (!(decoded.known && decoded.outer_ip)) + return; + + ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + + if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) + goto checksum_fail; + else if (ipv6 && (rx_status & + (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) + goto checksum_fail; + + /* check for L4 errors and handle packets that were not able to be + * checksummed due to arrival speed + */ + if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) + goto checksum_fail; + + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case ICE_RX_PTYPE_INNER_PROT_TCP: + case ICE_RX_PTYPE_INNER_PROT_UDP: + case ICE_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + default: + break; + } + return; + +checksum_fail: + vsi->back->hw_csum_rx_error++; +} + +/** + * ice_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @ptype: the packet type decoded by hardware + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. + */ +static void ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype) +{ + ice_rx_hash(rx_ring, rx_desc, skb, ptype); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); +} + +/** + * ice_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + * + * This function sends the completed packet (via. skb) up the stack using + * gro receive functions (with/without vlan tag) + */ +static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, + u16 vlan_tag) +{ + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + napi_gro_receive(&rx_ring->q_vector->napi, skb); +} + +/** + * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + */ +static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_pkts = 0; + u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + bool failure = false; + + /* start the loop to process RX packets bounded by 'budget' */ + while (likely(total_rx_pkts < (unsigned int)budget)) { + union ice_32b_rx_flex_desc *rx_desc; + struct sk_buff *skb; + u16 stat_err_bits; + u16 vlan_tag = 0; + u8 rx_ptype; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= ICE_RX_BUF_WRITE) { + failure = failure || + ice_alloc_rx_bufs(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* get the RX desc from RX ring based on 'next_to_clean' */ + rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); + if (!ice_test_staterr(rx_desc, stat_err_bits)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + /* allocate (if needed) and populate skb */ + skb = ice_fetch_rx_buf(rx_ring, rx_desc); + if (!skb) + break; + + cleaned_count++; + + /* skip if it is NOP desc */ + if (ice_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); + if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { + dev_kfree_skb_any(skb); + continue; + } + + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; + + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); + if (ice_test_staterr(rx_desc, stat_err_bits)) + vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); + + /* correct empty headers and pad skb if needed (to make valid + * ethernet frame + */ + if (ice_cleanup_headers(skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + /* send completed skb up the stack */ + ice_receive_skb(rx_ring, skb, vlan_tag); + + /* update budget accounting */ + total_rx_pkts++; + } + + /* update queue and vector specific stats */ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.pkts += total_rx_pkts; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_pkts += total_rx_pkts; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_pkts; +} + +/** + * ice_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + */ +int ice_napi_poll(struct napi_struct *napi, int budget) +{ + struct ice_q_vector *q_vector = + container_of(napi, struct ice_q_vector, napi); + struct ice_vsi *vsi = q_vector->vsi; + struct ice_pf *pf = vsi->back; + bool clean_complete = true; + int budget_per_ring = 0; + struct ice_ring *ring; + int work_done = 0; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + ice_for_each_ring(ring, q_vector->tx) + if (!ice_clean_tx_irq(vsi, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + return budget; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + if (q_vector->num_ring_rx) + budget_per_ring = max(budget / q_vector->num_ring_rx, 1); + + ice_for_each_ring(ring, q_vector->rx) { + int cleaned; + + cleaned = ice_clean_rx_irq(ring, budget_per_ring); + work_done += cleaned; + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; + } + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); + return 0; +} + +/* helper function for building cmd/type/offset */ +static __le64 +build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) +{ + return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | + (td_cmd << ICE_TXD_QW1_CMD_S) | + (td_offset << ICE_TXD_QW1_OFFSET_S) | + ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | + (td_tag << ICE_TXD_QW1_L2TAG1_S)); +} + +/** + * __ice_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + */ +static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(ICE_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_subqueue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); + ++tx_ring->tx_stats.restart_q; + return 0; +} + +/** + * ice_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + */ +static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) +{ + if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __ice_maybe_stop_tx(tx_ring, size); +} + +/** + * ice_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @first: first buffer info buffer to use + * @off: pointer to struct that holds offload parameters + * + * This function loops over the skb data pointed to by *first + * and gets a physical address for each memory location and programs + * it and the length into the transmit descriptor. + */ +static void +ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, + struct ice_tx_offload_params *off) +{ + u64 td_offset, td_tag, td_cmd; + u16 i = tx_ring->next_to_use; + struct skb_frag_struct *frag; + unsigned int data_len, size; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + struct sk_buff *skb; + dma_addr_t dma; + + td_tag = off->td_l2tag1; + td_cmd = off->td_cmd; + td_offset = off->td_offset; + skb = first->skb; + + data_len = skb->data_len; + size = skb_headlen(skb); + + tx_desc = ICE_TX_DESC(tx_ring, i); + + if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { + td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; + td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> + ICE_TX_FLAGS_VLAN_S; + } + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buf = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + /* align size to end of page */ + max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); + tx_desc->buf_addr = cpu_to_le64(dma); + + /* account for data chunks larger than the hardware + * can handle + */ + while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, max_data, td_tag); + + tx_desc++; + i++; + + if (i == tx_ring->count) { + tx_desc = ICE_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += max_data; + size -= max_data; + + max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; + tx_desc->buf_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + + if (i == tx_ring->count) { + tx_desc = ICE_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buf = &tx_ring->tx_buf[i]; + } + + /* record bytecount for BQL */ + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* record SW timestamp if HW timestamp is not available */ + skb_tx_timestamp(first->skb); + + i++; + if (i == tx_ring->count) + i = 0; + + /* write last descriptor with RS and EOP bits */ + td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + tx_ring->next_to_use = i; + + ice_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return; + +dma_error: + /* clear dma mappings for failed tx_buf map */ + for (;;) { + tx_buf = &tx_ring->tx_buf[i]; + ice_unmap_and_free_tx_buf(tx_ring, tx_buf); + if (tx_buf == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * ice_tx_csum - Enable Tx checksum offloads + * @first: pointer to the first descriptor + * @off: pointer to struct that holds offload parameters + * + * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. + */ +static +int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) +{ + u32 l4_len = 0, l3_len = 0, l2_len = 0; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + __be16 frag_off, protocol; + unsigned char *exthdr; + u32 offset, cmd = 0; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + l2_len = ip.hdr - skb->data; + offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; + + if (skb->encapsulation) + return -1; + + /* Enable IP checksum offloads */ + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (first->tx_flags & ICE_TX_FLAGS_TSO) + cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; + else + cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; + + } else if (protocol == htons(ETH_P_IPV6)) { + cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + } else { + return -1; + } + + /* compute inner L3 header size */ + l3_len = l4.hdr - ip.hdr; + offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; + + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + /* enable checksum offloads */ + cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; + l4_len = l4.tcp->doff; + offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; + break; + case IPPROTO_SCTP: + default: + if (first->tx_flags & ICE_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; + } + + off->td_cmd |= cmd; + off->td_offset |= offset; + return 1; +} + +/** + * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @tx_ring: ring to send buffer on + * @first: pointer to struct ice_tx_buf + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + */ +static int +ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) +{ + struct sk_buff *skb = first->skb; + __be16 protocol = skb->protocol; + + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* when HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (skb_vlan_tag_present(skb)) { + first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; + first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + /* for SW VLAN, check the next protocol and store the tag */ + vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, + sizeof(_vhdr), + &_vhdr); + if (!vhdr) + return -EINVAL; + + first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << + ICE_TX_FLAGS_VLAN_S; + first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; + } + +out: + return 0; +} + +/** + * ice_tso - computes mss and TSO length to prepare for TSO + * @first: pointer to struct ice_tx_buf + * @off: pointer to struct that holds offload parameters + * + * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. + */ +static +int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) +{ + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u64 cd_mss, cd_tso_len; + u32 paylen, l4_start; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + /* determine offset of transport header */ + l4_start = l4.hdr - skb->data; + + /* remove payload length from checksum */ + paylen = skb->len - l4_start; + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + /* compute length of segmentation header */ + off->header_len = (l4.tcp->doff * 4) + l4_start; + + /* update gso_segs and bytecount */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount = (first->gso_segs - 1) * off->header_len; + + cd_tso_len = skb->len - off->header_len; + cd_mss = skb_shinfo(skb)->gso_size; + + /* record cdesc_qw1 with TSO parameters */ + off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX | + (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | + (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | + (cd_mss << ICE_TXD_CTX_QW1_MSS_S); + first->tx_flags |= ICE_TX_FLAGS_TSO; + return 1; +} + +/** + * ice_txd_use_count - estimate the number of descriptors needed for Tx + * @size: transmit request size in bytes + * + * Due to hardware alignment restrictions (4K alignment), we need to + * assume that we can have no more than 12K of data per descriptor, even + * though each descriptor can take up to 16K - 1 bytes of aligned memory. + * Thus, we need to divide by 12K. But division is slow! Instead, + * we decompose the operation into shifts and one relatively cheap + * multiply operation. + * + * To divide by 12K, we first divide by 4K, then divide by 3: + * To divide by 4K, shift right by 12 bits + * To divide by 3, multiply by 85, then divide by 256 + * (Divide by 256 is done by shifting right by 8 bits) + * Finally, we add one to round up. Because 256 isn't an exact multiple of + * 3, we'll underestimate near each multiple of 12K. This is actually more + * accurate as we have 4K - 1 of wiggle room that we can fit into the last + * segment. For our purposes this is accurate out to 1M which is orders of + * magnitude greater than our largest possible GSO size. + * + * This would then be implemented as: + * return (((size >> 12) * 85) >> 8) + 1; + * + * Since multiplication and division are commutative, we can reorder + * operations into: + * return ((size * 85) >> 20) + 1; + */ +static unsigned int ice_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +/** + * ice_xmit_desc_count - calculate number of tx descriptors needed + * @skb: send buffer + * + * Returns number of data descriptors needed for this skb. + */ +static unsigned int ice_xmit_desc_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int count = 0, size = skb_headlen(skb); + + for (;;) { + count += ice_txd_use_count(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * __ice_chk_linearize - Check if there are more than 8 buffers per packet + * @skb: send buffer + * + * Note: This HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. + */ +static bool __ice_chk_linearize(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag, *stale; + int nr_frags, sum; + + /* no need to check if number of frags is less than 7 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < (ICE_MAX_BUF_TXD - 1)) + return false; + + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. + */ + nr_frags -= ICE_MAX_BUF_TXD - 2; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - skb_shinfo(skb)->gso_size; + + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + stale = &skb_shinfo(skb)->frags[0]; + for (;;) { + sum += skb_frag_size(frag++); + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + if (!nr_frags--) + break; + + sum -= skb_frag_size(stale++); + } + + return false; +} + +/** + * ice_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + */ +static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < ICE_MAX_BUF_TXD)) + return false; + + if (skb_is_gso(skb)) + return __ice_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != ICE_MAX_BUF_TXD; +} + +/** + * ice_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +static netdev_tx_t +ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) +{ + struct ice_tx_offload_params offload = { 0 }; + struct ice_tx_buf *first; + unsigned int count; + int tso, csum; + + count = ice_xmit_desc_count(skb); + if (ice_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = ice_txd_use_count(skb->len); + tx_ring->tx_stats.tx_linearize++; + } + + /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + offload.tx_ring = tx_ring; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buf[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->gso_segs = 1; + first->tx_flags = 0; + + /* prepare the VLAN tagging flags for Tx */ + if (ice_tx_prepare_vlan_flags(tx_ring, first)) + goto out_drop; + + /* set up TSO offload */ + tso = ice_tso(first, &offload); + if (tso < 0) + goto out_drop; + + /* always set up Tx checksum offload */ + csum = ice_tx_csum(first, &offload); + if (csum < 0) + goto out_drop; + + if (tso || offload.cd_tunnel_params) { + struct ice_tx_ctx_desc *cdesc; + int i = tx_ring->next_to_use; + + /* grab the next descriptor */ + cdesc = ICE_TX_CTX_DESC(tx_ring, i); + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* setup context descriptor */ + cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); + cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); + cdesc->rsvd = cpu_to_le16(0); + cdesc->qw1 = cpu_to_le64(offload.cd_qw1); + } + + ice_tx_map(tx_ring, first, &offload); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_ring *tx_ring; + + tx_ring = vsi->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, ICE_MIN_TX_LEN)) + return NETDEV_TX_OK; + + return ice_xmit_frame_ring(skb, tx_ring); +} diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h new file mode 100644 index 000000000000..567067b650c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_TXRX_H_ +#define _ICE_TXRX_H_ + +#define ICE_DFLT_IRQ_WORK 256 +#define ICE_RXBUF_2048 2048 +#define ICE_MAX_CHAINED_RX_BUFS 5 +#define ICE_MAX_BUF_TXD 8 +#define ICE_MIN_TX_LEN 17 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define ICE_MAX_READ_REQ_SIZE 4096 +#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define ICE_MAX_DATA_PER_TXD_ALIGNED \ + (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) + +#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ +#define ICE_MAX_TXQ_PER_TXQG 128 + +/* Tx Descriptors needed, worst case */ +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define ICE_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define ICE_TX_FLAGS_TSO BIT(0) +#define ICE_TX_FLAGS_HW_VLAN BIT(1) +#define ICE_TX_FLAGS_SW_VLAN BIT(2) +#define ICE_TX_FLAGS_VLAN_M 0xffff0000 +#define ICE_TX_FLAGS_VLAN_S 16 + +struct ice_tx_buf { + struct ice_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + u32 tx_flags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct ice_tx_offload_params { + u8 header_len; + u32 td_cmd; + u32 td_offset; + u32 td_l2tag1; + u16 cd_l2tag2; + u32 cd_tunnel_params; + u64 cd_qw1; + struct ice_ring *tx_ring; +}; + +struct ice_rx_buf { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct ice_q_stats { + u64 pkts; + u64 bytes; +}; + +struct ice_txq_stats { + u64 restart_q; + u64 tx_busy; + u64 tx_linearize; +}; + +struct ice_rxq_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buf_failed; + u64 page_reuse_count; +}; + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum ice_dyn_idx_t { + ICE_IDX_ITR0 = 0, + ICE_IDX_ITR1 = 1, + ICE_IDX_ITR2 = 2, + ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* Header split modes defined by DTYPE field of Rx RLAN context */ +enum ice_rx_dtype { + ICE_RX_DTYPE_NO_SPLIT = 0, + ICE_RX_DTYPE_HEADER_SPLIT = 1, + ICE_RX_DTYPE_SPLIT_ALWAYS = 2, +}; + +/* indices into GLINT_ITR registers */ +#define ICE_RX_ITR ICE_IDX_ITR0 +#define ICE_TX_ITR ICE_IDX_ITR1 +#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define ICE_ITR_8K 0x003E + +/* apply ITR HW granularity translation to program the HW registers */ +#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) + +/* Legacy or Advanced Mode Queue */ +#define ICE_TX_ADVANCED 0 +#define ICE_TX_LEGACY 1 + +/* descriptor ring, associated with a VSI */ +struct ice_ring { + struct ice_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + struct ice_vsi *vsi; /* Backreference to associated VSI */ + struct ice_q_vector *q_vector; /* Backreference to associated vector */ + u8 __iomem *tail; + union { + struct ice_tx_buf *tx_buf; + struct ice_rx_buf *rx_buf; + }; + u16 q_index; /* Queue number of ring */ + u32 txq_teid; /* Added Tx queue TEID */ + + /* high bit set means dynamic, use accessor routines to read/write. + * hardware supports 2us/1us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + bool ring_active; /* is ring online or not */ + + /* stats structs */ + struct ice_q_stats stats; + struct u64_stats_sync syncp; + union { + struct ice_txq_stats tx_stats; + struct ice_rxq_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; +} ____cacheline_internodealigned_in_smp; + +enum ice_latency_range { + ICE_LOWEST_LATENCY = 0, + ICE_LOW_LATENCY = 1, + ICE_BULK_LATENCY = 2, + ICE_ULTRA_LATENCY = 3, +}; + +struct ice_ring_container { + /* array of pointers to rings */ + struct ice_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_pkts; /* total packets processed this int */ + enum ice_latency_range latency_range; + u16 itr; +}; + +/* iterator for handling rings in ring container */ +#define ice_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); +netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); +void ice_clean_tx_ring(struct ice_ring *tx_ring); +void ice_clean_rx_ring(struct ice_ring *rx_ring); +int ice_setup_tx_ring(struct ice_ring *tx_ring); +int ice_setup_rx_ring(struct ice_ring *rx_ring); +void ice_free_tx_ring(struct ice_ring *tx_ring); +void ice_free_rx_ring(struct ice_ring *rx_ring); +int ice_napi_poll(struct napi_struct *napi, int budget); + +#endif /* _ICE_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h new file mode 100644 index 000000000000..99c8a9a71b5e --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -0,0 +1,394 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_TYPE_H_ +#define _ICE_TYPE_H_ + +#include "ice_status.h" +#include "ice_hw_autogen.h" +#include "ice_osdep.h" +#include "ice_controlq.h" +#include "ice_lan_tx_rx.h" + +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 + +static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) +{ + return test_bit(tc, (unsigned long *)&bitmap); +} + +/* debug masks - set these bits in hw->debug_mask to control output */ +#define ICE_DBG_INIT BIT_ULL(1) +#define ICE_DBG_LINK BIT_ULL(4) +#define ICE_DBG_QCTX BIT_ULL(6) +#define ICE_DBG_NVM BIT_ULL(7) +#define ICE_DBG_LAN BIT_ULL(8) +#define ICE_DBG_SW BIT_ULL(13) +#define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_RES BIT_ULL(17) +#define ICE_DBG_AQ_MSG BIT_ULL(24) +#define ICE_DBG_AQ_CMD BIT_ULL(27) +#define ICE_DBG_USER BIT_ULL(31) + +enum ice_aq_res_ids { + ICE_NVM_RES_ID = 1, + ICE_SPD_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID, + ICE_CHANGE_LOCK_RES_ID +}; + +enum ice_aq_res_access_type { + ICE_RES_READ = 1, + ICE_RES_WRITE +}; + +enum ice_fc_mode { + ICE_FC_NONE = 0, + ICE_FC_RX_PAUSE, + ICE_FC_TX_PAUSE, + ICE_FC_FULL, + ICE_FC_PFC, + ICE_FC_DFLT +}; + +enum ice_set_fc_aq_failures { + ICE_SET_FC_AQ_FAIL_NONE = 0, + ICE_SET_FC_AQ_FAIL_GET, + ICE_SET_FC_AQ_FAIL_SET, + ICE_SET_FC_AQ_FAIL_UPDATE +}; + +/* Various MAC types */ +enum ice_mac_type { + ICE_MAC_UNKNOWN = 0, + ICE_MAC_GENERIC, +}; + +/* Media Types */ +enum ice_media_type { + ICE_MEDIA_UNKNOWN = 0, + ICE_MEDIA_FIBER, + ICE_MEDIA_BASET, + ICE_MEDIA_BACKPLANE, + ICE_MEDIA_DA, +}; + +enum ice_vsi_type { + ICE_VSI_PF = 0, +}; + +struct ice_link_status { + /* Refer to ice_aq_phy_type for bits definition */ + u64 phy_type_low; + u16 max_frame_size; + u16 link_speed; + bool lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 pacing; + u8 req_speeds; + /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of + * ice_aqc_get_phy_caps structure + */ + u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; +}; + +/* PHY info such as phy_type, etc... */ +struct ice_phy_info { + struct ice_link_status link_info; + struct ice_link_status link_info_old; + u64 phy_type_low; + enum ice_media_type media_type; + bool get_link_info; +}; + +/* Common HW capabilities for SW use */ +struct ice_hw_common_caps { + /* TX/RX queues */ + u16 num_rxq; /* Number/Total RX queues */ + u16 rxq_first_id; /* First queue ID for RX queues */ + u16 num_txq; /* Number/Total TX queues */ + u16 txq_first_id; /* First queue ID for TX queues */ + + /* MSI-X vectors */ + u16 num_msix_vectors; + u16 msix_vector_first_id; + + /* Max MTU for function or device */ + u16 max_mtu; + + /* RSS related capabilities */ + u16 rss_table_size; /* 512 for PFs and 64 for VFs */ + u8 rss_table_entry_width; /* RSS Entry width in bits */ +}; + +/* Function specific capabilities */ +struct ice_hw_func_caps { + struct ice_hw_common_caps common_cap; + u32 guaranteed_num_vsi; +}; + +/* Device wide capabilities */ +struct ice_hw_dev_caps { + struct ice_hw_common_caps common_cap; + u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ +}; + +/* MAC info */ +struct ice_mac_info { + u8 lan_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; +}; + +/* Various RESET request, These are not tied with HW reset types */ +enum ice_reset_req { + ICE_RESET_PFR = 0, + ICE_RESET_CORER = 1, + ICE_RESET_GLOBR = 2, +}; + +/* Bus parameters */ +struct ice_bus_info { + u16 device; + u8 func; +}; + +/* Flow control (FC) parameters */ +struct ice_fc_info { + enum ice_fc_mode current_mode; /* FC mode in effect */ + enum ice_fc_mode req_mode; /* FC mode requested by caller */ +}; + +/* NVM Information */ +struct ice_nvm_info { + u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ + u16 sr_words; /* Shadow RAM size in words */ + u16 ver; /* NVM package version */ + bool blank_nvm_mode; /* is NVM empty (no FW present) */ +}; + +/* Max number of port to queue branches w.r.t topology */ +#define ICE_MAX_TRAFFIC_CLASS 8 +#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS + +struct ice_sched_node { + struct ice_sched_node *parent; + struct ice_sched_node *sibling; /* next sibling in the same layer */ + struct ice_sched_node **children; + struct ice_aqc_txsched_elem_data info; + u32 agg_id; /* aggregator group id */ + u16 vsi_id; + bool in_use; /* suspended or in use */ + u8 tx_sched_layer; /* Logical Layer (1-9) */ + u8 num_children; + u8 tc_num; + u8 owner; +#define ICE_SCHED_NODE_OWNER_LAN 0 +}; + +/* Access Macros for Tx Sched Elements data */ +#define ICE_TXSCHED_GET_NODE_TEID(x) le32_to_cpu((x)->info.node_teid) + +/* The aggregator type determines if identifier is for a VSI group, + * aggregator group, aggregator of queues, or queue group. + */ +enum ice_agg_type { + ICE_AGG_TYPE_UNKNOWN = 0, + ICE_AGG_TYPE_VSI, + ICE_AGG_TYPE_AGG, /* aggregator */ + ICE_AGG_TYPE_Q, + ICE_AGG_TYPE_QG +}; + +#define ICE_SCHED_DFLT_RL_PROF_ID 0 + +/* vsi type list entry to locate corresponding vsi/ag nodes */ +struct ice_sched_vsi_info { + struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS]; + struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; + struct list_head list_entry; + u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; + u16 vsi_id; +}; + +/* driver defines the policy */ +struct ice_sched_tx_policy { + u16 max_num_vsis; + u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; + bool rdma_ena; +}; + +struct ice_port_info { + struct ice_sched_node *root; /* Root Node per Port */ + struct ice_hw *hw; /* back pointer to hw instance */ + u32 last_node_teid; /* scheduler last node info */ + u16 sw_id; /* Initial switch ID belongs to port */ + u16 pf_vf_num; + u8 port_state; +#define ICE_SCHED_PORT_STATE_INIT 0x0 +#define ICE_SCHED_PORT_STATE_READY 0x1 + u16 dflt_tx_vsi_rule_id; + u16 dflt_tx_vsi_num; + u16 dflt_rx_vsi_rule_id; + u16 dflt_rx_vsi_num; + struct ice_fc_info fc; + struct ice_mac_info mac; + struct ice_phy_info phy; + struct mutex sched_lock; /* protect access to TXSched tree */ + struct ice_sched_tx_policy sched_policy; + struct list_head vsi_info_list; + struct list_head agg_list; /* lists all aggregator */ + u8 lport; +#define ICE_LPORT_MASK 0xff + bool is_vf; +}; + +struct ice_switch_info { + /* Switch VSI lists to MAC/VLAN translation */ + struct mutex mac_list_lock; /* protect MAC list */ + struct list_head mac_list_head; + struct mutex vlan_list_lock; /* protect VLAN list */ + struct list_head vlan_list_head; + struct mutex eth_m_list_lock; /* protect ethtype list */ + struct list_head eth_m_list_head; + struct mutex promisc_list_lock; /* protect promisc mode list */ + struct list_head promisc_list_head; + struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */ + struct list_head mac_vlan_list_head; + + struct list_head vsi_list_map_head; +}; + +/* Port hardware description */ +struct ice_hw { + u8 __iomem *hw_addr; + void *back; + struct ice_aqc_layer_props *layer_info; + struct ice_port_info *port_info; + u64 debug_mask; /* bitmap for debug mask */ + enum ice_mac_type mac_type; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + u8 pf_id; /* device profile info */ + + /* TX Scheduler values */ + u16 num_tx_sched_layers; + u16 num_tx_sched_phys_layers; + u8 flattened_layers; + u8 max_cgds; + u8 sw_entry_point_layer; + + bool evb_veb; /* true for VEB, false for VEPA */ + struct ice_bus_info bus; + struct ice_nvm_info nvm; + struct ice_hw_dev_caps dev_caps; /* device capabilities */ + struct ice_hw_func_caps func_caps; /* function capabilities */ + + struct ice_switch_info *switch_info; /* switch filter lists */ + + /* Control Queue info */ + struct ice_ctl_q_info adminq; + + u8 api_branch; /* API branch version */ + u8 api_maj_ver; /* API major version */ + u8 api_min_ver; /* API minor version */ + u8 api_patch; /* API patch version */ + u8 fw_branch; /* firmware branch version */ + u8 fw_maj_ver; /* firmware major version */ + u8 fw_min_ver; /* firmware minor version */ + u8 fw_patch; /* firmware patch version */ + u32 fw_build; /* firmware build number */ + + /* minimum allowed value for different speeds */ +#define ICE_ITR_GRAN_MIN_200 1 +#define ICE_ITR_GRAN_MIN_100 1 +#define ICE_ITR_GRAN_MIN_50 2 +#define ICE_ITR_GRAN_MIN_25 4 + /* ITR granularity in 1 us */ + u8 itr_gran_200; + u8 itr_gran_100; + u8 itr_gran_50; + u8 itr_gran_25; + bool ucast_shared; /* true if VSIs can share unicast addr */ + +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct ice_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected by the MAC */ +struct ice_hw_port_stats { + /* eth stats collected by the port */ + struct ice_eth_stats eth; + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_len_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ +}; + +/* Checksum and Shadow RAM pointers */ +#define ICE_SR_NVM_DEV_STARTER_VER 0x18 +#define ICE_SR_NVM_EETRACK_LO 0x2D +#define ICE_SR_NVM_EETRACK_HI 0x2E +#define ICE_NVM_VER_LO_SHIFT 0 +#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT) +#define ICE_NVM_VER_HI_SHIFT 12 +#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT) +#define ICE_OEM_VER_PATCH_SHIFT 0 +#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT) +#define ICE_OEM_VER_BUILD_SHIFT 8 +#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT) +#define ICE_OEM_VER_SHIFT 24 +#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT) +#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define ICE_SR_WORDS_IN_1KB 512 + +#endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 5bcb2de75933..c48583e98ac1 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel 82575 PCI-Express Ethernet Linux driver diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index c37cc8bccf47..dd9b6cac220d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index acf06051e111..e53ebe97d709 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 83cabff1e0ab..98534f765e0e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 6c9485ab4b57..ff835e1e853d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 07d48f2e3369..6f548247e6d8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index b2964a2a60b1..56f015ccb206 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5eff82678f0b..298afa0d9159 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 90c8893c3eed..04d80c765aee 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index bffd58f7b2a1..ef42f1689b3b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index a62b08e1572e..4f0ecd28354d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 3582c5cf8843..e4596f151cd4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index febc9cdb7391..dde68cd54a53 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 413025bdcb50..4ec61243da82 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2015 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 9b622b33bb5a..856d2cda0643 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 568c96842f28..e8fa8c6530e0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1c6b8d9176a8..8dbc399b345e 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * @@ -109,6 +110,7 @@ struct vf_data_storage { u16 pf_qos; u16 tx_rate; bool spoofchk_enabled; + bool trusted; }; /* Number of unicast MAC filters reserved for the PF in the RAR registers */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 606e6761758f..e77ba0d5866d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 44b6a68f1af7..bebe43b3a836 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * @@ -147,7 +148,7 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter, &adapter->hw.mac.thermal_sensor_data.sensor[offset]; igb_attr->hw = &adapter->hw; igb_attr->dev_attr.store = NULL; - igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.mode = 0444; igb_attr->dev_attr.attr.name = igb_attr->name; sysfs_attr_init(&igb_attr->dev_attr.attr); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b88fae785369..c1c0bc30a16d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * @@ -190,6 +191,8 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool setting); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); @@ -774,8 +777,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg) if (!(~value) && (!reg || !(~readl(hw_addr)))) { struct net_device *netdev = igb->netdev; hw->hw_addr = NULL; - netif_device_detach(netdev); - netdev_err(netdev, "PCIe link lost, device now detached\n"); + netdev_err(netdev, "PCIe link lost\n"); } return value; @@ -2527,6 +2529,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, .ndo_set_vf_rate = igb_ndo_set_vf_bw, .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = igb_ndo_set_vf_trust, .ndo_get_vf_config = igb_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = igb_netpoll, @@ -5747,7 +5750,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON && + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; @@ -6383,6 +6386,9 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf) /* By default spoof check is enabled for all VFs */ adapter->vf_data[vf].spoofchk_enabled = true; + /* By default VFs are not trusted */ + adapter->vf_data[vf].trusted = false; + return 0; } @@ -6940,13 +6946,13 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, } break; case E1000_VF_MAC_FILTER_ADD: - if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { dev_warn(&pdev->dev, "VF %d requested MAC filter but is administratively denied\n", vf); return -EINVAL; } - if (!is_valid_ether_addr(addr)) { dev_warn(&pdev->dev, "VF %d attempted to set invalid MAC filter\n", @@ -6998,7 +7004,8 @@ static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) int ret = 0; if (!info) { - if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { dev_warn(&pdev->dev, "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", vf); @@ -8934,6 +8941,22 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, return 0; } +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + if (adapter->vf_data[vf].trusted == setting) + return 0; + + adapter->vf_data[vf].trusted = setting; + + dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", + vf, setting ? "" : "not "); + return 0; +} + static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -8947,6 +8970,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + ivi->trusted = adapter->vf_data[vf].trusted; return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0746b19ec6d3..7454b9895a65 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 * * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index 044b0ad5fcb9..efe29dae384a 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index f1789d192e24..04bcfec0641b 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index a127688e83e6..ca39e3cccaeb 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index bf69f01f8467..f5bf248e22eb 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index c9a441632e9f..9195884096f8 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 30d58c4a444e..479b062fe9ee 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 4214c1519a87..e2b7502f1953 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 86a7c120b574..614e52409f11 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 9577ccf4b26a..bfe8d8297b2e 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index d213eefb6169..193b50026246 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile index 0b20c5e62ffe..1b42dd554dd2 100644 --- a/drivers/net/ethernet/intel/ixgb/Makefile +++ b/drivers/net/ethernet/intel/ixgb/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index 1180cd59b570..92022841755f 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h index 5680f64314b8..475297a810fe 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 0bd5d72e1af5..19f36d87ef61 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h index 32c1b302d791..24e849902d60 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h index 8fc905192231..b1710379192e 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel PRO/10GbE Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 8319465eb38d..4cd96c88cb5d 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index c1e3a0039ea5..4f08c712e58e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index a0ebd9ecf243..cb0fe5fedb33 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 4dfc81dbee4b..66a74f4651e8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 61188f343955..633be93f3dbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 4d4c02366cb3..2b311382167a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver @@ -153,6 +154,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +#define IXGBE_FAILED_READ_RETRIES 5 #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU #define IXGBE_FAILED_READ_CFG_WORD 0xffffU diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index fc0a2dd52499..73b6362d4327 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index f94c7e82a30b..085130626330 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index 3164f5453b8f..7edce607f901 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index 90c370230e20..fa030f0abc18 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 221f15803480..c0e6ab42e0e1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -97,6 +97,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_length_errors", IXGBE_STAT(stats.rlec)}, {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, @@ -3059,6 +3060,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, for (i = 0; i < reta_entries; i++) adapter->rss_indir_tbl[i] = indir[i]; + + ixgbe_store_reta(adapter); } /* Fill out the rss hash key */ @@ -3067,8 +3070,6 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, ixgbe_store_key(adapter); } - ixgbe_store_reta(adapter); - return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 38385876effb..cf1919901514 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 93eacddb6704..68af127987bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -423,15 +423,21 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, const char aes_gcm_name[] = "rfc4106(gcm(aes))"; int key_len; - if (xs->aead) { - key_data = &xs->aead->alg_key[0]; - key_len = xs->aead->alg_key_len; - alg_name = xs->aead->alg_name; - } else { + if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; } + if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + IXGBE_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + if (strcmp(alg_name, aes_gcm_name)) { netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", aes_gcm_name); @@ -718,23 +724,10 @@ static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) return true; } -/** - * ixgbe_ipsec_free - called by xfrm garbage collections - * @xs: pointer to transformer state struct - * - * We don't have any garbage to collect, so we shouldn't bother - * implementing this function, but the XFRM code doesn't check for - * existence before calling the API callback. - **/ -static void ixgbe_ipsec_free(struct xfrm_state *xs) -{ -} - static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { .xdo_dev_state_add = ixgbe_ipsec_add_sa, .xdo_dev_state_delete = ixgbe_ipsec_del_sa, .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, - .xdo_dev_state_free = ixgbe_ipsec_free, }; /** @@ -781,13 +774,40 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; - itd->flags = 0; if (xs->id.proto == IPPROTO_ESP) { + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | IXGBE_ADVTXD_TUCMD_L4T_TCP; if (first->protocol == htons(ETH_P_IP)) itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; - itd->trailer_len = xs->props.trailer_len; + + /* The actual trailer length is authlen (16 bytes) plus + * 2 bytes for the proto and the padlen values, plus + * padlen bytes of padding. This ends up not the same + * as the static value found in xs->props.trailer_len (21). + * + * ... but if we're doing GSO, don't bother as the stack + * doesn't add a trailer for those. + */ + if (!skb_is_gso(first->skb)) { + /* The "correct" way to get the auth length would be + * to use + * authlen = crypto_aead_authsize(xs->data); + * but since we know we only have one size to worry + * about * we can let the compiler use the constant + * and save us a few CPU cycles. + */ + const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; + struct sk_buff *skb = first->skb; + u8 padlen; + int ret; + + ret = skb_copy_bits(skb, skb->len - (authlen + 2), + &padlen, 1); + if (unlikely(ret)) + return 0; + itd->trailer_len = authlen + 2 + padlen; + } } if (tsa->encrypt) itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; @@ -909,8 +929,13 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) ixgbe_ipsec_clear_hw_tables(adapter); adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; - adapter->netdev->features |= NETIF_F_HW_ESP; - adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP; + +#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + adapter->netdev->features |= IXGBE_ESP_FEATURES; + adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES; return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index da3ce7849e85..4f099f516645 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver @@ -32,6 +33,7 @@ #define IXGBE_IPSEC_MAX_RX_IP_COUNT 128 #define IXGBE_IPSEC_BASE_RX_INDEX 0 #define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT +#define IXGBE_IPSEC_AUTH_BITS 128 #define IXGBE_RXTXIDX_IPS_EN 0x00000001 #define IXGBE_RXIDX_TBL_SHIFT 1 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 4242f0213e46..ed4cbe94c355 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -58,7 +58,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) return false; /* start at VMDq register offset for SR-IOV enabled setups */ - pool = 0; reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0da5aa2c8aba..afadba99f7b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -353,23 +353,32 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw) ixgbe_service_event_schedule(adapter); } -static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) +static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) { + u8 __iomem *reg_addr; u32 value; + int i; - /* The following check not only optimizes a bit by not - * performing a read on the status register when the - * register just read was a status register read that - * returned IXGBE_FAILED_READ_REG. It also blocks any - * potential recursion. + reg_addr = READ_ONCE(hw->hw_addr); + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + + /* Register read of 0xFFFFFFF can indicate the adapter has been removed, + * so perform several status register reads to determine if the adapter + * has been removed. */ - if (reg == IXGBE_STATUS) { - ixgbe_remove_adapter(hw); - return; + for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) { + value = readl(reg_addr + IXGBE_STATUS); + if (value != IXGBE_FAILED_READ_REG) + break; + mdelay(3); } - value = ixgbe_read_reg(hw, IXGBE_STATUS); + if (value == IXGBE_FAILED_READ_REG) ixgbe_remove_adapter(hw); + else + value = readl(reg_addr + reg); + return value; } /** @@ -415,7 +424,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) - ixgbe_check_remove(hw, reg); + value = ixgbe_check_remove(hw, reg); return value; } @@ -1620,7 +1629,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = ixgbe_rx_offset(rx_ring); - bi->pagecnt_bias = 1; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; rx_ring->rx_stats.alloc_rx_page++; return true; @@ -1888,6 +1898,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@ -2022,8 +2040,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -7703,7 +7721,8 @@ static void ixgbe_service_task(struct work_struct *work) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - ixgbe_ptp_rx_hang(adapter); + if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) + ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); } @@ -7712,7 +7731,8 @@ static void ixgbe_service_task(struct work_struct *work) static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbe_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@ -7726,6 +7746,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -7751,13 +7772,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start; /* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -7788,12 +7811,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + fceof_saidx |= itd->sa_idx; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, mss_l4len_idx); return 1; @@ -7855,10 +7881,8 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) { - fceof_saidx |= itd->sa_idx; - type_tucmd |= itd->flags | itd->trailer_len; - } + fceof_saidx |= itd->sa_idx; + type_tucmd |= itd->flags | itd->trailer_len; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); } @@ -8486,7 +8510,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif - tso = ixgbe_tso(tx_ring, first, &hdr_len); + tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) @@ -9895,15 +9919,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, /* We can only support IPV4 TSO in tunnels if we can mangle the * inner IP ID field, so strip TSO if MANGLEID is not supported. + * IPsec offoad sets skb->encapsulation but still can handle + * the TSO, so it's the exception. */ - if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) - features &= ~NETIF_F_TSO; - -#ifdef CONFIG_XFRM_OFFLOAD - /* IPsec offload doesn't get along well with others *yet* */ - if (skb->sp) - features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM); + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { +#ifdef CONFIG_XFRM + if (!skb->sp) #endif + features &= ~NETIF_F_TSO; + } return features; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 811cb4f64a5b..c4628b663590 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 538a1c5475b6..72446644f9fa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux drive diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index b0cac961df3b..d6a7e77348c5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 27a70a52f3c9..008aa073a679 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -831,7 +831,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ - ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + reg = IXGBE_QDE_ENABLE; + if (adapter->vfinfo[vf].pf_vlan) + reg |= IXGBE_QDE_HIDE_VLAN; + + ixgbe_write_qde(adapter, vf, reg); /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index cf67b9b18ed7..e30d1f07e891 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index ef6df3d6437e..24766e125592 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -146,7 +146,7 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, &adapter->hw.mac.thermal_sensor_data.sensor[offset]; ixgbe_attr->hw = &adapter->hw; ixgbe_attr->dev_attr.store = NULL; - ixgbe_attr->dev_attr.attr.mode = S_IRUGO; + ixgbe_attr->dev_attr.attr.mode = 0444; ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; sysfs_attr_init(&ixgbe_attr->dev_attr.attr); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ca45359686d3..2daa81e6e9b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index e21cd48491d3..182d640e9f7a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index f470d0204771..3123267dfba9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1847,9 +1847,9 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, (IXGBE_CS4227_EDC_MODE_SR << 1)); if (setup_linear) - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; else - reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; ret_val = hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index 4ce4c97ef5ad..bb47814cfa90 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 ################################################################################ # # Intel 82599 Virtual Function driver diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 8617cae2f801..71c828842b11 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 82599 Virtual Function driver diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 4400e49090b4..8e7d6c6f5c92 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2018 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -82,6 +82,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { #define IXGBEVF_QUEUE_STATS_LEN ( \ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ (sizeof(struct ixgbevf_stats) / sizeof(u64))) #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) @@ -94,6 +95,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings) + static int ixgbevf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -241,6 +249,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN; } static void ixgbevf_get_ringparam(struct net_device *netdev, @@ -260,7 +270,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; u32 new_rx_count, new_tx_count; - int i, err = 0; + int i, j, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -284,15 +294,19 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } if (new_tx_count != adapter->tx_ring_count) { - tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); + tx_ring = vmalloc((adapter->num_tx_queues + + adapter->num_xdp_queues) * sizeof(*tx_ring)); if (!tx_ring) { err = -ENOMEM; goto clear_reset; @@ -315,6 +329,24 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, goto clear_reset; } } + + for (j = 0; j < adapter->num_xdp_queues; i++, j++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->xdp_ring[j]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } } if (new_rx_count != adapter->rx_ring_count) { @@ -327,8 +359,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) { /* clone ring and setup updated count */ rx_ring[i] = *adapter->rx_ring[i]; + + /* Clear copied XDP RX-queue info */ + memset(&rx_ring[i].xdp_rxq, 0, + sizeof(rx_ring[i].xdp_rxq)); + rx_ring[i].count = new_rx_count; - err = ixgbevf_setup_rx_resources(&rx_ring[i]); + err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) { i--; @@ -354,6 +391,12 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, } adapter->tx_ring_count = new_tx_count; + for (j = 0; j < adapter->num_xdp_queues; i++, j++) { + ixgbevf_free_tx_resources(adapter->xdp_ring[j]); + *adapter->xdp_ring[j] = tx_ring[i]; + } + adapter->xdp_ring_count = new_tx_count; + vfree(tx_ring); tx_ring = NULL; } @@ -376,7 +419,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, clear_reset: /* free Tx resources if Rx error is encountered */ if (tx_ring) { - for (i = 0; i < adapter->num_tx_queues; i++) + for (i = 0; + i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) ixgbevf_free_tx_resources(&tx_ring[i]); vfree(tx_ring); } @@ -392,6 +436,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) return IXGBEVF_TEST_LEN; case ETH_SS_STATS: return IXGBEVF_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return IXGBEVF_PRIV_FLAGS_STR_LEN; default: return -EINVAL; } @@ -446,6 +492,23 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, i += 2; } + /* populate XDP queue data */ + for (j = 0; j < adapter->num_xdp_queues; j++) { + ring = adapter->xdp_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + /* populate Rx queue data */ for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; @@ -489,6 +552,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } + for (i = 0; i < adapter->num_xdp_queues; i++) { + sprintf(p, "xdp_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "xdp_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; @@ -496,6 +565,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; } break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbevf_priv_flags_strings, + IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; } } @@ -888,6 +961,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return err; } +static u32 ixgbevf_get_priv_flags(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) + priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IXGBEVF_FLAGS_LEGACY_RX; + if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX) + flags |= IXGBEVF_FLAGS_LEGACY_RX; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, @@ -909,6 +1013,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh = ixgbevf_get_rxfh, .get_link_ksettings = ixgbevf_get_link_ksettings, + .get_priv_flags = ixgbevf_get_priv_flags, + .set_priv_flags = ixgbevf_set_priv_flags, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index f6952425c87d..447ce1d5e0e3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2018 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,6 +35,7 @@ #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/u64_stats_sync.h> +#include <net/xdp.h> #include "vf.h" @@ -50,7 +52,11 @@ struct ixgbevf_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ + void *data; + }; unsigned int bytecount; unsigned short gso_segs; __be16 protocol; @@ -89,20 +95,25 @@ struct ixgbevf_rx_queue_stats { }; enum ixgbevf_ring_state_t { + __IXGBEVF_RX_3K_BUFFER, + __IXGBEVF_RX_BUILD_SKB_ENABLED, __IXGBEVF_TX_DETECT_HANG, __IXGBEVF_HANG_CHECK_ARMED, + __IXGBEVF_TX_XDP_RING, }; -#define check_for_tx_hang(ring) \ - test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state) struct ixgbevf_ring { struct ixgbevf_ring *next; + struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */ struct net_device *netdev; + struct bpf_prog *xdp_prog; struct device *dev; void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ @@ -123,7 +134,7 @@ struct ixgbevf_ring { struct ixgbevf_tx_queue_stats tx_stats; struct ixgbevf_rx_queue_stats rx_stats; }; - + struct xdp_rxq_info xdp_rxq; u64 hw_csum_rx_error; u8 __iomem *tail; struct sk_buff *skb; @@ -133,13 +144,14 @@ struct ixgbevf_ring { */ u16 reg_idx; int queue_index; /* needed for multiqueue queue management */ -}; +} ____cacheline_internodealigned_in_smp; /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES +#define MAX_XDP_QUEUES IXGBE_VF_MAX_TX_QUEUES #define IXGBEVF_MAX_RSS_QUEUES 2 #define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ #define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ @@ -156,12 +168,20 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ #define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_3072 3072 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 -#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define IXGBEVF_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD) +#else +#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048 +#endif + #define IXGBE_TX_FLAGS_CSUM BIT(0) #define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_TSO BIT(2) @@ -170,6 +190,50 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 +#define ring_uses_large_buffer(ring) \ + test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) +#define set_ring_uses_large_buffer(ring) \ + set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) + +#define ring_uses_build_skb(ring) \ + test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define set_ring_build_skb_enabled(ring) \ + set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) + +static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IXGBEVF_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IXGBEVF_MAX_FRAME_BUILD_SKB; +#endif + return IXGBEVF_RXBUFFER_2048; +} + +static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring)) + +#define check_for_tx_hang(ring) \ + test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) + struct ixgbevf_ring_container { struct ixgbevf_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -194,7 +258,11 @@ struct ixgbevf_q_vector { u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; struct ixgbevf_ring_container rx, tx; + struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int state; #define IXGBEVF_QV_STATE_IDLE 0 @@ -284,6 +352,10 @@ struct ixgbevf_adapter { u32 eims_enable_mask; u32 eims_other; + /* XDP */ + int num_xdp_queues; + struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES]; + /* TX */ int num_tx_queues; struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ @@ -304,6 +376,7 @@ struct ixgbevf_adapter { /* OS defined structs */ struct net_device *netdev; + struct bpf_prog *xdp_prog; struct pci_dev *pdev; /* structs defined in ixgbe_vf.h */ @@ -317,6 +390,7 @@ struct ixgbevf_adapter { unsigned long state; u64 tx_busy; unsigned int tx_ring_count; + unsigned int xdp_ring_count; unsigned int rx_ring_count; u8 __iomem *io_addr; /* Mainly for iounmap use */ @@ -331,6 +405,8 @@ struct ixgbevf_adapter { u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; + u32 flags; +#define IXGBEVF_FLAGS_LEGACY_RX BIT(1) }; enum ixbgevf_state_t { @@ -388,7 +464,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter); void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); void ixgbevf_reset(struct ixgbevf_adapter *adapter); void ixgbevf_set_ethtool_ops(struct net_device *netdev); -int ixgbevf_setup_rx_resources(struct ixgbevf_ring *); +int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring); int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); void ixgbevf_free_rx_resources(struct ixgbevf_ring *); void ixgbevf_free_tx_resources(struct ixgbevf_ring *); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9b3d43d28106..3d9033f26eff 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2018 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -50,6 +50,9 @@ #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <net/mpls.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <linux/atomic.h> #include "ixgbevf.h" @@ -130,6 +133,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); +static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer); +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff); static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) { @@ -318,7 +324,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -382,7 +391,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; - pr_err("Detected Tx Unit Hang\n" + pr_err("Detected Tx Unit Hang%s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" @@ -392,6 +401,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, " eop_desc->wb.status <%x>\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? " XDP" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), @@ -399,7 +409,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, eop_desc, (eop_desc ? eop_desc->wb.status : 0), tx_ring->tx_buffer_info[i].time_stamp, jiffies); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); /* schedule immediate reset if we believe we hung */ ixgbevf_tx_timeout_reset(adapter); @@ -407,6 +419,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, return true; } + if (ring_is_xdp(tx_ring)) + return !!budget; + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { @@ -527,6 +542,51 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } +static +struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, + const unsigned int size) +{ + struct ixgbevf_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ixgbevf_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (IS_ERR(skb)) + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbevf_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBEVF_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + /** * ixgbevf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed @@ -554,32 +614,38 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, return true; } +static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; +} + static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *bi) { struct page *page = bi->page; - dma_addr_t dma = bi->dma; + dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) return true; /* alloc new page for storage */ - page = dev_alloc_page(); + page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; } /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_page(page); + __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_rx_page_failed++; return false; @@ -587,7 +653,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ixgbevf_rx_offset(rx_ring); bi->pagecnt_bias = 1; rx_ring->rx_stats.alloc_rx_page++; @@ -621,7 +687,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, - IXGBEVF_RX_BUFSZ, + ixgbevf_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* Refresh the desc even if pkt_addr didn't change @@ -685,6 +751,10 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + /* verify that the packet does not have any known errors */ if (unlikely(ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { @@ -734,11 +804,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } -static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, - struct page *page, - const unsigned int truesize) +static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) { - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) @@ -746,17 +815,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_ref_count(page) != pagecnt_bias)) + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; - #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; +#define IXGBEVF_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) - if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) return false; #endif @@ -765,7 +830,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(pagecnt_bias == 1)) { + if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -777,136 +842,268 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into + * @size: size of buffer to be added * * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. - * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. **/ -static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, +static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, - u16 size, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = IXGBEVF_RX_BUFSZ; + unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : + SKB_DATA_ALIGN(size); #endif - unsigned int pull_len; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} - if (unlikely(skb_is_nonlinear(skb))) - goto add_tail_frag; +static +struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int headlen; + struct sk_buff *skb; - if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags <dev> legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via ixgbevf_build_skb(). + * + * For ixgbevf_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * changed in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ - /* page is not reserved, we can reuse buffer as is */ - if (likely(!ixgbevf_page_is_reserved(page))) - return true; + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; - /* this page cannot be reused so discard it */ - return false; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IXGBEVF_RX_HDR_SIZE) + headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + memcpy(__skb_put(skb, headlen), xdp->data, + ALIGN(headlen, sizeof(long))); /* update all of the pointers */ - va += pull_len; - size -= pull_len; + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - + page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } -add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (unsigned long)va & ~PAGE_MASK, size, truesize); + return skb; +} + +static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, + u32 qmask) +{ + struct ixgbe_hw *hw = &adapter->hw; - return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } -static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) { - struct ixgbevf_rx_buffer *rx_buffer; - struct page *page; - u16 size = le16_to_cpu(rx_desc->wb.upper.length); + unsigned int metasize = xdp->data - xdp->data_meta; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points to xdp->data, otherwise, we likely + * have a consumer accessing first few bytes of meta data, + * and then actual data. + */ + prefetch(xdp->data_meta); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data_meta + L1_CACHE_BYTES); +#endif - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; #endif - /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBEVF_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; - } + return skb; +} - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - } +#define IXGBEVF_XDP_PASS 0 +#define IXGBEVF_XDP_CONSUMED 1 +#define IXGBEVF_XDP_TX 2 - /* pull page into skb */ - if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ixgbevf_reuse_rx_page(rx_ring, rx_buffer); - } else { - /* We are not reusing the buffer so unmap it and free - * any references we are holding to it - */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE, - IXGBEVF_RX_DMA_ATTR); - __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); - } +static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, + struct xdp_buff *xdp) +{ + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + u32 len, cmd_type; + dma_addr_t dma; + u16 i; - /* clear contents of buffer_info */ - rx_buffer->dma = 0; - rx_buffer->page = NULL; + len = xdp->data_end - xdp->data; - return skb; + if (unlikely(!ixgbevf_desc_unused(ring))) + return IXGBEVF_XDP_CONSUMED; + + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + return IXGBEVF_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = IXGBEVF_TX_DESC(ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->data = xdp->data; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | + IXGBE_ADVTXD_CC); + + /* Avoid any potential race with cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + + return IXGBEVF_XDP_TX; } -static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, - u32 qmask) +static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring, + struct xdp_buff *xdp) { - struct ixgbe_hw *hw = &adapter->hw; + int result = IXGBEVF_XDP_PASS; + struct ixgbevf_ring *xdp_ring; + struct bpf_prog *xdp_prog; + u32 act; - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; + result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBEVF_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif } static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, @@ -914,11 +1111,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct ixgbevf_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbevf_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; + bool xdp_xmit = false; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < budget)) { + struct ixgbevf_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { @@ -927,8 +1131,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.length) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -937,15 +1141,48 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, */ rmb(); + rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); + /* retrieve a buffer from the ring */ - skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - + ixgbevf_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + + skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { + xdp_xmit = true; + ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, + size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = ixgbevf_build_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } else { + skb = ixgbevf_construct_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; break; } + ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* fetch next buffer in frame if non-eop */ @@ -987,6 +1224,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; + if (xdp_xmit) { + struct ixgbevf_ring *xdp_ring = + adapter->xdp_ring[rx_ring->queue_index]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); + } + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1260,85 +1508,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) return IRQ_HANDLED; } -static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, - int r_idx) -{ - struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - - a->rx_ring[r_idx]->next = q_vector->rx.ring; - q_vector->rx.ring = a->rx_ring[r_idx]; - q_vector->rx.count++; -} - -static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, - int t_idx) -{ - struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - - a->tx_ring[t_idx]->next = q_vector->tx.ring; - q_vector->tx.ring = a->tx_ring[t_idx]; - q_vector->tx.count++; -} - -/** - * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors - * @adapter: board private structure to initialize - * - * This function maps descriptor rings to the queue-specific vectors - * we were allotted through the MSI-X enabling code. Ideally, we'd have - * one vector per ring/queue, but on a constrained vector budget, we - * group the rings as "efficiently" as possible. You would add new - * mapping configurations in here. - **/ -static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) -{ - int q_vectors; - int v_start = 0; - int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int i, j; - int rqpv, tqpv; - - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* The ideal configuration... - * We have enough vectors to map one per queue. - */ - if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { - for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) - map_vector_to_rxq(adapter, v_start, rxr_idx); - - for (; txr_idx < txr_remaining; v_start++, txr_idx++) - map_vector_to_txq(adapter, v_start, txr_idx); - return 0; - } - - /* If we don't have enough vectors for a 1-to-1 - * mapping, we'll have to group them so there are - * multiple queues per vector. - */ - /* Re-adjusting *qpv takes care of the remainder. */ - for (i = v_start; i < q_vectors; i++) { - rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); - for (j = 0; j < rqpv; j++) { - map_vector_to_rxq(adapter, i, rxr_idx); - rxr_idx++; - rxr_remaining--; - } - } - for (i = v_start; i < q_vectors; i++) { - tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); - for (j = 0; j < tqpv; j++) { - map_vector_to_txq(adapter, i, txr_idx); - txr_idx++; - txr_remaining--; - } - } - - return 0; -} - /** * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure @@ -1411,20 +1580,6 @@ free_queue_irqs: return err; } -static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) -{ - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (i = 0; i < q_vectors; i++) { - struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; - - q_vector->rx.ring = NULL; - q_vector->tx.ring = NULL; - q_vector->rx.count = 0; - q_vector->tx.count = 0; - } -} - /** * ixgbevf_request_irq - initialize interrupts * @adapter: board private structure @@ -1464,8 +1619,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } - - ixgbevf_reset_q_vectors(adapter); } /** @@ -1583,11 +1736,14 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 -static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) +static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring, int index) { struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; @@ -1595,7 +1751,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) srrctl = IXGBE_SRRCTL_DROP_EN; srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); @@ -1767,10 +1926,21 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ring->next_to_use = 0; ring->next_to_alloc = 0; - ixgbevf_configure_srrctl(adapter, reg_idx); + ixgbevf_configure_srrctl(adapter, ring, reg_idx); + + /* RXDCTL.RLPML does not work on 82599 */ + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); - /* allow any size packet since we can handle overflow */ - rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; +#if (PAGE_SIZE < 8192) + /* Limit the maximum frame size so we don't overrun the skb */ + if (ring_uses_build_skb(ring) && + !ring_uses_large_buffer(ring)) + rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB | + IXGBE_RXDCTL_RLPML_EN; +#endif + } rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); @@ -1779,6 +1949,29 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); } +static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring) +{ + struct net_device *netdev = adapter->netdev; + unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) + return; + + set_ring_build_skb_enabled(rx_ring); + + if (PAGE_SIZE < 8192) { + if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); + } +} + /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure @@ -1806,8 +1999,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; + + ixgbevf_set_rx_buffer_len(adapter, rx_ring); + ixgbevf_configure_rx_ring(adapter, rx_ring); + } } static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, @@ -2136,13 +2333,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - IXGBEVF_RX_BUFSZ, + ixgbevf_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, + ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); @@ -2172,7 +2369,10 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -2240,6 +2440,8 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); } void ixgbevf_down(struct ixgbevf_adapter *adapter) @@ -2278,6 +2480,13 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) IXGBE_TXDCTL_SWFLSH); } + for (i = 0; i < adapter->num_xdp_queues; i++) { + u8 reg_idx = adapter->xdp_ring[i]->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), + IXGBE_TXDCTL_SWFLSH); + } + if (!pci_channel_offline(adapter->pdev)) ixgbevf_reset(adapter); @@ -2375,6 +2584,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; + adapter->num_xdp_queues = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2396,8 +2606,13 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + if (adapter->xdp_prog && + hw->mac.max_tx_queues == rss) + rss = rss > 3 ? 2 : 1; + adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; + adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; default: break; } @@ -2405,105 +2620,209 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) } /** - * ixgbevf_alloc_queues - Allocate memory for all rings + * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. The polling_netdev array is - * intended for Multiqueue, but should work fine with a single queue. + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. **/ -static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) +static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { + int vector, v_budget; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + /* A failure in MSI-X entry allocation isn't fatal, but the VF driver + * does not support any other modes, so we will simply fail here. Note + * that we clean up the msix_entries pointer else-where. + */ + return ixgbevf_acquire_msix_vectors(adapter, v_budget); +} + +static void ixgbevf_add_ring(struct ixgbevf_ring *ring, + struct ixgbevf_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_idx: index of vector in adapter struct + * @txr_count: number of Tx rings for q vector + * @txr_idx: index of first Tx ring to assign + * @xdp_count: total number of XDP rings to allocate + * @xdp_idx: index of first XDP ring to allocate + * @rxr_count: number of Rx rings for q vector + * @rxr_idx: index of first Rx ring to assign + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, + int txr_count, int txr_idx, + int xdp_count, int xdp_idx, + int rxr_count, int rxr_idx) +{ + struct ixgbevf_q_vector *q_vector; + int reg_idx = txr_idx + xdp_idx; struct ixgbevf_ring *ring; - int rx = 0, tx = 0; + int ring_count, size; + + ring_count = txr_count + xdp_count + rxr_count; + size = sizeof(*q_vector) + (sizeof(*ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); - for (; tx < adapter->num_tx_queues; tx++) { - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbevf_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - ring->queue_index = tx; - ring->reg_idx = tx; + ring->queue_index = txr_idx; + ring->reg_idx = reg_idx; - adapter->tx_ring[tx] = ring; + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx++; + reg_idx++; + + /* push pointer to next ring */ + ring++; } - for (; rx < adapter->num_rx_queues; rx++) { - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbevf_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + ring->reg_idx = reg_idx; + set_ring_xdp(ring); + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx++; + reg_idx++; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ixgbevf_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - ring->queue_index = rx; - ring->reg_idx = rx; + ring->queue_index = rxr_idx; + ring->reg_idx = rxr_idx; - adapter->rx_ring[rx] = ring; - } + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; - return 0; + /* update count and index */ + rxr_count--; + rxr_idx++; -err_allocation: - while (tx) { - kfree(adapter->tx_ring[--tx]); - adapter->tx_ring[tx] = NULL; + /* push pointer to next ring */ + ring++; } - while (rx) { - kfree(adapter->rx_ring[--rx]); - adapter->rx_ring[rx] = NULL; - } - return -ENOMEM; + return 0; } /** - * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported + * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector * @adapter: board private structure to initialize + * @v_idx: index of vector in adapter struct * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. **/ -static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) +static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) { - struct net_device *netdev = adapter->netdev; - int err; - int vector, v_budget; - - /* It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - * The default is to use pairs of vectors. - */ - v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); - v_budget = min_t(int, v_budget, num_online_cpus()); - v_budget += NON_Q_VECTORS; - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. - */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) - return -ENOMEM; + struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbevf_ring *ring; - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; + ixgbevf_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } - err = ixgbevf_acquire_msix_vectors(adapter, v_budget); - if (err) - return err; + ixgbevf_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; - err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); - if (err) - return err; + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); - return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + /* ixgbevf_get_stats() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); } /** @@ -2515,35 +2834,58 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) **/ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { - int q_idx, num_q_vectors; - struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int xdp_remaining = adapter->num_xdp_queues; + int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + err = ixgbevf_alloc_q_vector(adapter, v_idx, + 0, 0, 0, 0, rqpv, rxr_idx); + if (err) + goto err_out; - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); - if (!q_vector) + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + } + } + + for (; q_vectors; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors); + + err = ixgbevf_alloc_q_vector(adapter, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) goto err_out; - q_vector->adapter = adapter; - q_vector->v_idx = q_idx; - netif_napi_add(adapter->netdev, &q_vector->napi, - ixgbevf_poll, 64); - adapter->q_vector[q_idx] = q_vector; + + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + txr_remaining -= tqpv; + txr_idx += tqpv; + xdp_remaining -= xqpv; + xdp_idx += xqpv; } return 0; err_out: - while (q_idx) { - q_idx--; - q_vector = adapter->q_vector[q_idx]; -#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_del(&q_vector->napi); -#endif - netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; + while (v_idx) { + v_idx--; + ixgbevf_free_q_vector(adapter, v_idx); } + return -ENOMEM; } @@ -2557,17 +2899,11 @@ err_out: **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { - int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - adapter->q_vector[q_idx] = NULL; -#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_del(&q_vector->napi); -#endif - netif_napi_del(&q_vector->napi); - kfree(q_vector); + while (q_vectors) { + q_vectors--; + ixgbevf_free_q_vector(adapter, q_vectors); } } @@ -2611,21 +2947,14 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) goto err_alloc_q_vectors; } - err = ixgbevf_alloc_queues(adapter); - if (err) { - pr_err("Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues, + adapter->num_xdp_queues); set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; -err_alloc_queues: - ixgbevf_free_q_vectors(adapter); err_alloc_q_vectors: ixgbevf_reset_interrupt_capability(adapter); err_set_interrupt: @@ -2641,18 +2970,8 @@ err_set_interrupt: **/ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) { - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - kfree(adapter->rx_ring[i]); - adapter->rx_ring[i] = NULL; - } - adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; ixgbevf_free_q_vectors(adapter); @@ -2860,6 +3179,8 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); } /* get one bit for every active Tx/Rx interrupt vector */ @@ -3031,6 +3352,9 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbevf_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + if (adapter->xdp_ring[i]->desc) + ixgbevf_free_tx_resources(adapter->xdp_ring[i]); } /** @@ -3081,26 +3405,44 @@ err: **/ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) { - int i, err = 0; + int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); break; } + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (j--) + ixgbevf_free_tx_resources(adapter->xdp_ring[j]); + while (i--) + ixgbevf_free_tx_resources(adapter->tx_ring[i]); + return err; } /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) +int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring) { int size; @@ -3121,6 +3463,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) if (!rx_ring->desc) goto err; + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, + rx_ring->queue_index) < 0) + goto err; + + rx_ring->xdp_prog = adapter->xdp_prog; + return 0; err: vfree(rx_ring->rx_buffer_info); @@ -3144,12 +3493,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); + err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); - break; + goto err_setup_rx; } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbevf_free_rx_resources(adapter->rx_ring[i]); return err; } @@ -3163,6 +3518,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) { ixgbevf_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -3244,28 +3601,31 @@ int ixgbevf_open(struct net_device *netdev) ixgbevf_configure(adapter); - /* Map the Tx/Rx rings to the vectors we were allotted. - * if request_irq will be called in this function map_rings - * must be called *before* up_complete - */ - ixgbevf_map_rings_to_vectors(adapter); - err = ixgbevf_request_irq(adapter); if (err) goto err_req_irq; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + ixgbevf_up_complete(adapter); return 0; +err_set_queues: + ixgbevf_free_irq(adapter); err_req_irq: - ixgbevf_down(adapter); -err_setup_rx: ixgbevf_free_all_rx_resources(adapter); -err_setup_tx: +err_setup_rx: ixgbevf_free_all_tx_resources(adapter); +err_setup_tx: ixgbevf_reset(adapter); - err_setup_reset: return err; @@ -3707,11 +4067,10 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) return __ixgbevf_maybe_stop_tx(tx_ring, size); } -static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, + struct ixgbevf_ring *tx_ring) { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_tx_buffer *first; - struct ixgbevf_ring *tx_ring; int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); @@ -3726,8 +4085,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - tx_ring = adapter->tx_ring[skb->queue_mapping]; - /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, @@ -3780,6 +4137,29 @@ out_drop: return NETDEV_TX_OK; } +static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring; + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + return ixgbevf_xmit_frame_ring(skb, tx_ring); +} + /** * ixgbevf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -3826,6 +4206,12 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int ret; + /* prevent MTU being changed to a size unsupported by XDP */ + if (adapter->xdp_prog) { + dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); + return -EPERM; + } + spin_lock_bh(&adapter->mbx_lock); /* notify the PF of our intent to use this size of frame */ ret = hw->mac.ops.set_rlpml(hw, max_frame); @@ -3839,6 +4225,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + return 0; } @@ -3917,17 +4306,11 @@ static int ixgbevf_resume(struct pci_dev *pdev) rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ixgbevf_open(netdev); rtnl_unlock(); - if (err) { - dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + if (err) return err; - } - - if (netif_running(netdev)) { - err = ixgbevf_open(netdev); - if (err) - return err; - } netif_device_attach(netdev); @@ -3940,6 +4323,23 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ixgbevf_suspend(pdev, PMSG_SUSPEND); } +static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, + const struct ixgbevf_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + } +} + static void ixgbevf_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3953,6 +4353,7 @@ static void ixgbevf_get_stats(struct net_device *netdev, stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; + rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; do { @@ -3966,14 +4367,14 @@ static void ixgbevf_get_stats(struct net_device *netdev, for (i = 0; i < adapter->num_tx_queues; i++) { ring = adapter->tx_ring[i]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - bytes = ring->stats.bytes; - packets = ring->stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_bytes += bytes; - stats->tx_packets += packets; + ixgbevf_get_tx_ring_stats(stats, ring); + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + ring = adapter->xdp_ring[i]; + ixgbevf_get_tx_ring_stats(stats, ring); } + rcu_read_unlock(); } #define IXGBEVF_MAX_MAC_HDR_LEN 127 @@ -4010,6 +4411,64 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + + /* verify ixgbevf ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbevf_ring *ring = adapter->rx_ring[i]; + + if (frame_size > ixgbevf_rx_bufsz(ring)) + return -EINVAL; + } + + old_prog = xchg(&adapter->xdp_prog, prog); + + /* If transitioning XDP modes reconfigure rings */ + if (!!prog != !!old_prog) { + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbevf_close(dev); + + ixgbevf_clear_interrupt_scheme(adapter); + ixgbevf_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + ixgbevf_open(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ixgbevf_xdp_setup(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!(adapter->xdp_prog); + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -4026,6 +4485,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_poll_controller = ixgbevf_netpoll, #endif .ndo_features_check = ixgbevf_features_check, + .ndo_bpf = ixgbevf_xdp, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index bc0442acae78..5ec947fe3d09 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 82599 Virtual Function driver diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 2764fd16261f..278f73980501 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 82599 Virtual Function driver diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index c651fefcc3d2..194fbdaa4519 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /******************************************************************************* Intel 82599 Virtual Function driver diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 25e9a551cc8c..17a904cc6a5e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1132,6 +1132,7 @@ static void mvneta_port_up(struct mvneta_port *pp) } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + q_map = 0; /* Enable all initialized RXQs. */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; @@ -1555,7 +1556,6 @@ static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, { mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), value | MVNETA_RXQ_NON_OCCUPIED(0)); - rxq->pkts_coal = value; } /* Set the time delay in usec before RX interrupt will be generated by @@ -1571,7 +1571,6 @@ static void mvneta_rx_time_coal_set(struct mvneta_port *pp, val = (clk_rate / 1000000) * value; mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); - rxq->time_coal = value; } /* Set threshold for TX_DONE pkts coalescing */ @@ -1586,8 +1585,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, val |= MVNETA_TXQ_SENT_THRESH_MASK(value); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); - - txq->done_pkts_coal = value; } /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ @@ -2800,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp) /* Rx/Tx queue initialization/cleanup methods */ -/* Create a specified RX queue */ -static int mvneta_rxq_init(struct mvneta_port *pp, - struct mvneta_rx_queue *rxq) - +static int mvneta_rxq_sw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) { rxq->size = pp->rx_ring_size; @@ -2816,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp, rxq->last_desc = rxq->size - 1; + return 0; +} + +static void mvneta_rxq_hw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ /* Set Rx descriptors queue starting address */ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); @@ -2839,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp, mvneta_rxq_short_pool_set(pp, rxq); mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); } +} + +/* Create a specified RX queue */ +static int mvneta_rxq_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) + +{ + int ret; + + ret = mvneta_rxq_sw_init(pp, rxq); + if (ret < 0) + return ret; + + mvneta_rxq_hw_init(pp, rxq); return 0; } @@ -2861,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->descs_phys = 0; } -/* Create and initialize a tx queue */ -static int mvneta_txq_init(struct mvneta_port *pp, - struct mvneta_tx_queue *txq) +static int mvneta_txq_sw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) { int cpu; @@ -2876,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; txq->tx_wake_threshold = txq->tx_stop_threshold / 2; - /* Allocate memory for TX descriptors */ txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2886,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - /* Set maximum bandwidth for enabled TXQs */ - mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); - mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); - - /* Set Tx descriptors queue starting address */ - mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); - mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), GFP_KERNEL); if (!txq->tx_skb) { @@ -2914,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->descs, txq->descs_phys); return -ENOMEM; } - mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); /* Setup XPS mapping */ if (txq_number > 1) @@ -2927,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp, return 0; } +static void mvneta_txq_hw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + /* Set maximum bandwidth for enabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); + + /* Set Tx descriptors queue starting address */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); + + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); +} + +/* Create and initialize a tx queue */ +static int mvneta_txq_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int ret; + + ret = mvneta_txq_sw_init(pp, txq); + if (ret < 0) + return ret; + + mvneta_txq_hw_init(pp, txq); + + return 0; +} + /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ -static void mvneta_txq_deinit(struct mvneta_port *pp, - struct mvneta_tx_queue *txq) +static void mvneta_txq_sw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); @@ -2950,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_phys = 0; +} +static void mvneta_txq_hw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ /* Set minimum bandwidth for disabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); @@ -2960,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); } +static void mvneta_txq_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + mvneta_txq_sw_deinit(pp, txq); + mvneta_txq_hw_deinit(pp, txq); +} + /* Cleanup all Tx queues */ static void mvneta_cleanup_txqs(struct mvneta_port *pp) { @@ -3396,7 +3440,8 @@ static void mvneta_set_eee(struct mvneta_port *pp, bool enable) mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); } -static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) +static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface) { struct mvneta_port *pp = netdev_priv(ndev); u32 val; @@ -3415,6 +3460,7 @@ static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) } static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) { struct mvneta_port *pp = netdev_priv(ndev); @@ -4073,22 +4119,6 @@ static int mvneta_ethtool_set_wol(struct net_device *dev, return ret; } -static int mvneta_ethtool_get_module_info(struct net_device *dev, - struct ethtool_modinfo *modinfo) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_info(pp->phylink, modinfo); -} - -static int mvneta_ethtool_get_module_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, u8 *buf) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf); -} - static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_eee *eee) { @@ -4163,8 +4193,6 @@ static const struct ethtool_ops mvneta_eth_tool_ops = { .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, - .get_module_info = mvneta_ethtool_get_module_info, - .get_module_eeprom = mvneta_ethtool_get_module_eeprom, .get_eee = mvneta_ethtool_get_eee, .set_eee = mvneta_ethtool_set_eee, }; @@ -4543,16 +4571,45 @@ static int mvneta_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int mvneta_suspend(struct device *device) { + int queue; struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); + if (!netif_running(dev)) + goto clean_exit; + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = true; + spin_unlock(&pp->lock); + + cpuhp_state_remove_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + } + rtnl_lock(); - if (netif_running(dev)) - mvneta_stop(dev); + mvneta_stop_dev(pp); rtnl_unlock(); + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + mvneta_rxq_drop_pkts(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + mvneta_txq_hw_deinit(pp, txq); + } + +clean_exit: netif_device_detach(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); + return 0; } @@ -4561,7 +4618,7 @@ static int mvneta_resume(struct device *device) struct platform_device *pdev = to_platform_device(device); struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); - int err; + int err, queue; clk_prepare_enable(pp->clk); if (!IS_ERR(pp->clk_bus)) @@ -4583,12 +4640,38 @@ static int mvneta_resume(struct device *device) } netif_device_attach(dev); - rtnl_lock(); - if (netif_running(dev)) { - mvneta_open(dev); - mvneta_set_rx_mode(dev); + + if (!netif_running(dev)) + return 0; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + rxq->next_desc_to_proc = 0; + mvneta_rxq_hw_init(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + txq->next_desc_to_proc = 0; + mvneta_txq_hw_init(pp, txq); + } + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = false; + spin_unlock(&pp->lock); + cpuhp_state_add_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); } + + rtnl_lock(); + mvneta_start_dev(pp); rtnl_unlock(); + mvneta_set_rx_mode(dev); return 0; } @@ -4655,8 +4738,8 @@ MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_LICENSE("GPL"); -module_param(rxq_number, int, S_IRUGO); -module_param(txq_number, int, S_IRUGO); +module_param(rxq_number, int, 0444); +module_param(txq_number, int, 0444); -module_param(rxq_def, int, S_IRUGO); -module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); +module_param(rxq_def, int, 0444); +module_param(rx_copybreak, int, 0644); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a1d7b88cf083..7fc1bbf51c44 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -44,6 +44,7 @@ #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 #define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) #define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) /* RX DMA Top Registers */ @@ -65,6 +66,10 @@ #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + /* Parser Registers */ #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 #define MVPP2_PRS_PORT_LU_MAX 0xf @@ -254,6 +259,7 @@ #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) #define MVPP2_BM_START_MASK BIT(0) @@ -473,6 +479,7 @@ #define MVPP2_ETH_TYPE_LEN 2 #define MVPP2_PPPOE_HDR_SIZE 8 #define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 /* Lbtd 802.3 type */ #define MVPP2_IP_LBDT_TYPE 0xfffa @@ -536,6 +543,11 @@ /* TX FIFO constants */ #define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa #define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ @@ -589,6 +601,9 @@ enum mvpp2_tag_type { #define MVPP2_PRS_TCAM_PROTO_MASK 0xff #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f #define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 /* Tcam structure: * - lookup ID - 4 bits @@ -609,35 +624,81 @@ enum mvpp2_tag_type { #define MVPP2_PRS_TCAM_LU_BYTE 20 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) #define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + /* Tcam entries ID */ #define MVPP2_PE_DROP_ALL 0 #define MVPP2_PE_FIRST_FREE_TID 1 -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) -#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) -#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) -#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) -#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) -#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) -#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) -#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) -#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) -#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) -#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) -#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) -#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) -#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) -#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) -#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) -#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) -#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) -#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) -#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) -#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) -#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +/* reserved */ +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + /* Sram structure * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). */ @@ -725,6 +786,7 @@ enum mvpp2_tag_type { #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) #define MVPP2_PRS_SINGLE_VLAN_AI 0 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) /* DSA/EDSA type */ #define MVPP2_PRS_TAGGED true @@ -747,6 +809,7 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_MAC, MVPP2_PRS_LU_DSA, MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, MVPP2_PRS_LU_L2, MVPP2_PRS_LU_PPPOE, MVPP2_PRS_LU_IP4, @@ -755,6 +818,12 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + /* L3 cast enum */ enum mvpp2_prs_l3_cast { MVPP2_PRS_L3_UNI_CAST, @@ -772,23 +841,26 @@ enum mvpp2_prs_l3_cast { #define MVPP22_RSS_TABLE_ENTRIES 32 /* BM constants */ -#define MVPP2_BM_POOLS_NUM 8 +#define MVPP2_BM_JUMBO_BUF_NUM 512 #define MVPP2_BM_LONG_BUF_NUM 1024 #define MVPP2_BM_SHORT_BUF_NUM 2048 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) #define MVPP2_BM_POOL_PTR_ALIGN 128 -#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) -#define MVPP2_BM_SWF_SHORT_POOL 3 /* BM cookie (32 bits) definition */ #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 +#define MVPP2_BM_SHORT_FRAME_SIZE 512 +#define MVPP2_BM_LONG_FRAME_SIZE 2048 +#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 /* BM short pool packet size * These value assure that for SWF the total number * of bytes allocated for each buffer will be 512 */ -#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K @@ -796,12 +868,18 @@ enum mvpp2_prs_l3_cast { #define MVPP2_MAX_THREADS 8 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS -enum mvpp2_bm_type { - MVPP2_BM_FREE, - MVPP2_BM_SWF_LONG, - MVPP2_BM_SWF_SHORT +enum mvpp2_bm_pool_log_num { + MVPP2_BM_SHORT, + MVPP2_BM_LONG, + MVPP2_BM_JUMBO, + MVPP2_BM_POOLS_NUM }; +static struct { + int pkt_size; + int buf_num; +} mvpp2_pools[MVPP2_BM_POOLS_NUM]; + /* GMAC MIB Counters register definitions */ #define MVPP21_MIB_COUNTERS_OFFSET 0x1000 #define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 @@ -1230,7 +1308,6 @@ struct mvpp2_cls_lookup_entry { struct mvpp2_bm_pool { /* Pool number in the range 0-7 */ int id; - enum mvpp2_bm_type type; /* Buffer Pointers Pool External (BPPE) size */ int size; @@ -1282,6 +1359,10 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) return readl(priv->swth_base[0] + offset); } +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +{ + return readl_relaxed(priv->swth_base[0] + offset); +} /* These accessors should be used to access: * * - per-CPU registers, where each CPU has its own copy of the @@ -1330,6 +1411,18 @@ static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, return readl(priv->swth_base[cpu] + offset); } +static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel_relaxed(data, priv->swth_base[cpu] + offset); +} + +static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl_relaxed(priv->swth_base[cpu] + offset); +} + static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { @@ -1505,14 +1598,18 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return 0; } -/* Read tcam entry from hw */ -static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +/* Initialize tcam entry from hw */ +static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); @@ -1662,6 +1759,14 @@ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); } +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, int val) @@ -1828,16 +1933,11 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, } /* Find parser flow entry */ -static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); - /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { u8 bits; @@ -1846,17 +1946,15 @@ static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); - bits = mvpp2_prs_sram_ai_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Return first free tcam index, seeking from start to end */ @@ -1886,8 +1984,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - pe.index = MVPP2_PE_DROP_ALL; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -1914,78 +2011,42 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_write(priv, &pe); } -/* Set port to promiscuous mode */ -static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) +/* Set port to unicast or multicast promiscuous mode */ +static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) { struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; - /* Promiscuous mode - Accept unknown packets */ - - if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { - /* Entry exist - update port only */ - pe.index = MVPP2_PE_MAC_PROMISCUOUS; - mvpp2_prs_hw_read(priv, &pe); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = MVPP2_PE_MAC_PROMISCUOUS; - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, - MVPP2_PRS_RI_L2_CAST_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; } - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Accept multicast */ -static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, - bool add) -{ - struct mvpp2_prs_entry pe; - unsigned char da_mc; - - /* Ethernet multicast address first byte is - * 0x01 for IPv4 and 0x33 for IPv6 - */ - da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; - - if (priv->prs_shadow[index].valid) { - /* Entry exist - update port only */ - pe.index = index; - mvpp2_prs_hw_read(priv, &pe); + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { - /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = index; + pe.index = tid; /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, - MVPP2_PRS_RI_L2_CAST_MASK); + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); - /* Update tcam entry data first byte */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, @@ -2021,32 +2082,37 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; - /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ - mvpp2_prs_sram_shift_set(&pe, shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, 0, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - /* If packet is tagged continue check vlans */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); @@ -2084,8 +2150,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -2133,17 +2198,11 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, } /* Search for existing single/triple vlan entry */ -static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, - unsigned short tpid, int ai) +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { @@ -2154,19 +2213,17 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid; - - mvpp2_prs_hw_read(priv, pe); - match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); if (!match) continue; /* Get vlan type */ - ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; /* Get current ai value from tcam */ - ai_bits = mvpp2_prs_tcam_ai_get(pe); + ai_bits = mvpp2_prs_tcam_ai_get(&pe); /* Clear double vlan bit */ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; @@ -2175,34 +2232,31 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Add/update single/triple vlan entry */ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, unsigned int port_map) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid_aux, tid; int ret = 0; - pe = mvpp2_prs_vlan_find(priv, tpid, ai); + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); - if (!pe) { + if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - /* Get last double vlan tid */ for (tid_aux = MVPP2_PE_LAST_FREE_TID; tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { @@ -2212,50 +2266,46 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid_aux; - mvpp2_prs_hw_read(priv, pe); - ri_bits = mvpp2_prs_sram_ri_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) break; } - if (tid <= tid_aux) { - ret = -EINVAL; - goto free_pe; - } + if (tid <= tid_aux) + return -EINVAL; + + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - memset(pe, 0, sizeof(*pe)); - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - pe->index = tid; + mvpp2_prs_match_etype(&pe, 0, tpid); - mvpp2_prs_match_etype(pe, 0, tpid); + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); - /* Shift 4 bytes - skip 1 vlan tag */ - mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); } else { ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK); } - mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(pe, port_map); + mvpp2_prs_tcam_port_map_set(&pe, port_map); - mvpp2_prs_hw_write(priv, pe); -free_pe: - kfree(pe); + mvpp2_prs_hw_write(priv, &pe); return ret; } @@ -2274,18 +2324,12 @@ static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) } /* Search for existing double vlan entry */ -static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, - unsigned short tpid1, - unsigned short tpid2) +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { @@ -2296,22 +2340,20 @@ static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) - && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && + mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); if (!match) continue; - ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Add or update double vlan entry */ @@ -2319,28 +2361,24 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2, unsigned int port_map) { - struct mvpp2_prs_entry *pe; int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; - pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + memset(&pe, 0, sizeof(pe)); - if (!pe) { + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) { - ret = ai; - goto free_pe; - } + if (ai < 0) + return ai; /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; @@ -2351,46 +2389,44 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid_aux; - mvpp2_prs_hw_read(priv, pe); - ri_bits = mvpp2_prs_sram_ri_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) break; } - if (tid >= tid_aux) { - ret = -ERANGE; - goto free_pe; - } + if (tid >= tid_aux) + return -ERANGE; - memset(pe, 0, sizeof(*pe)); - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - pe->index = tid; + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; priv->prs_double_vlans[ai] = true; - mvpp2_prs_match_etype(pe, 0, tpid1); - mvpp2_prs_match_etype(pe, 4, tpid2); + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Shift 8 bytes - skip 2 vlan tags */ - mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(pe, port_map); - mvpp2_prs_hw_write(priv, pe); -free_pe: - kfree(pe); + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + return ret; } @@ -2694,11 +2730,10 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); - /* place holders only - no ports */ + /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, false); - mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false); - mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -2755,6 +2790,62 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); } +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + /* Match basic ethertypes */ static int mvpp2_prs_etype_init(struct mvpp2 *priv) { @@ -3023,7 +3114,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + /* Clear ai for next iterations */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, @@ -3386,6 +3478,192 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) return 0; } +/* Find tcam entry with matched pair <vid,port> */ +static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, + u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PE_VID_FILT_RANGE_START; + tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) + mvpp2_prs_vid_entry_remove(port, tid); + } +} + +/* Remove VID filering entry for this port */ +static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + /* Parser default initialization */ static int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) @@ -3429,6 +3707,8 @@ static int mvpp2_prs_default_init(struct platform_device *pdev, mvpp2_prs_dsa_init(priv); + mvpp2_prs_vid_init(priv); + err = mvpp2_prs_etype_init(priv); if (err) return err; @@ -3472,21 +3752,16 @@ static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, } /* Find tcam entry with matched pair <MAC DA, port> */ -static struct mvpp2_prs_entry * +static int mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, unsigned char *mask, int udf_type) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_ATOMIC); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); - /* Go through the all entires with MVPP2_PRS_LU_MAC */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned int entry_pmap; if (!priv->prs_shadow[tid].valid || @@ -3494,110 +3769,103 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); - entry_pmap = mvpp2_prs_tcam_port_map_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); - if (mvpp2_prs_mac_range_equals(pe, da, mask) && + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && entry_pmap == pmap) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Update parser's mac da entry */ -static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, - const u8 *da, bool add) +static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, + bool add) { - struct mvpp2_prs_entry *pe; - unsigned int pmap, len, ri; unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; int tid; + memset(&pe, 0, sizeof(pe)); + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ - pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, - MVPP2_PRS_UDF_MAC_DEF); + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); /* No such entry */ - if (!pe) { + if (tid < 0) { if (!add) return 0; /* Create new TCAM entry */ - /* Find first range mac entry*/ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) - if (priv->prs_shadow[tid].valid && - (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && - (priv->prs_shadow[tid].udf == - MVPP2_PRS_UDF_MAC_RANGE)) - break; - /* Go through the all entries from first to last */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - tid - 1); + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_ATOMIC); - if (!pe) - return -ENOMEM; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); - pe->index = tid; + pe.index = tid; /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(pe, 0); + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + /* Update port mask */ - mvpp2_prs_tcam_port_set(pe, port, add); + mvpp2_prs_tcam_port_set(&pe, port->id, add); /* Invalidate the entry if no ports are left enabled */ - pmap = mvpp2_prs_tcam_port_map_get(pe); + pmap = mvpp2_prs_tcam_port_map_get(&pe); if (pmap == 0) { - if (add) { - kfree(pe); + if (add) return -EINVAL; - } - mvpp2_prs_hw_inv(priv, pe->index); - priv->prs_shadow[pe->index].valid = false; - kfree(pe); + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; return 0; } /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set match on DA */ len = ETH_ALEN; while (len--) - mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); /* Set result info bits */ - if (is_broadcast_ether_addr(da)) + if (is_broadcast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_BCAST; - else if (is_multicast_ether_addr(da)) + } else if (is_multicast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_MCAST; - else - ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } - mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); - mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table and hw entry */ - priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, pe); - - kfree(pe); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); return 0; } @@ -3608,13 +3876,12 @@ static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) int err; /* Remove old parser entry */ - err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, - false); + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); if (err) return err; /* Add new parser entry */ - err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); + err = mvpp2_prs_mac_da_accept(port, da, true); if (err) return err; @@ -3624,14 +3891,15 @@ static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) return 0; } -/* Delete all port's multicast simple (not range) entries */ -static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) +static void mvpp2_prs_mac_del_all(struct mvpp2_port *port) { + struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; + unsigned long pmap; int index, tid; - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; if (!priv->prs_shadow[tid].valid || @@ -3639,18 +3907,28 @@ static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - /* Only simple mac entries */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; /* Read mac addr from entry */ for (index = 0; index < ETH_ALEN; index++) mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], &da_mask[index]); - if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) - /* Delete this entry */ - mvpp2_prs_mac_da_accept(priv, port, da, false); + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); } } @@ -3707,13 +3985,15 @@ static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) /* Set prs flow for the port */ static int mvpp2_prs_def_flow(struct mvpp2_port *port) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = mvpp2_prs_flow_find(port->priv, port->id); + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ - if (!pe) { + if (tid < 0) { /* Go through the all entires from last to first */ tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, @@ -3721,24 +4001,21 @@ static int mvpp2_prs_def_flow(struct mvpp2_port *port) if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); - pe->index = tid; + pe.index = tid; /* Set flow ID*/ - mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table */ - mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); } - mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); - mvpp2_prs_hw_write(port->priv, pe); - kfree(pe); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); return 0; } @@ -3901,7 +4178,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, val |= MVPP2_BM_START_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - bm_pool->type = MVPP2_BM_FREE; bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; @@ -3954,11 +4230,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool) + struct mvpp2_bm_pool *bm_pool, int buf_num) { int i; - for (i = 0; i < bm_pool->buf_num; i++) { + if (buf_num > bm_pool->buf_num) { + WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", + bm_pool->id, buf_num); + buf_num = bm_pool->buf_num; + } + + for (i = 0; i < buf_num; i++) { dma_addr_t buf_dma_addr; phys_addr_t buf_phys_addr; void *data; @@ -3980,16 +4262,39 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, bm_pool->buf_num -= i; } +/* Check number of buffers in BM pool */ +static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int buf_num = 0; + + buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & + MVPP22_BM_POOL_PTRS_NUM_MASK; + buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & + MVPP2_BM_BPPI_PTR_NUM_MASK; + + /* HW has one buffer ready which is not reflected in the counters */ + if (buf_num) + buf_num += 1; + + return buf_num; +} + /* Cleanup pool */ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { + int buf_num; u32 val; - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); - if (bm_pool->buf_num) { - WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); + + /* Check buffer counters after free */ + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + if (buf_num) { + WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", + bm_pool->id, bm_pool->buf_num); return 0; } @@ -4051,6 +4356,21 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) return 0; } +static void mvpp2_setup_bm_pool(void) +{ + /* Short pool */ + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; + + /* Long pool */ + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; + + /* Jumbo pool */ + mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; + mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; +} + /* Attach long pool to rxq */ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) @@ -4138,8 +4458,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - mvpp2_percpu_write(port->priv, cpu, - MVPP22_BM_ADDR_HIGH_RLS_REG, val); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); } /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply @@ -4147,10 +4467,10 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write(port->priv, cpu, - MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write(port->priv, cpu, - MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); put_cpu(); } @@ -4189,13 +4509,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, bm_pool->buf_num += i; netdev_dbg(port->dev, - "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", - bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", bm_pool->id, bm_pool->pkt_size, buf_size, total_size); netdev_dbg(port->dev, - "%s pool %d: %d of %d buffers added\n", - bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + "pool %d: %d of %d buffers added\n", bm_pool->id, i, buf_num); return i; } @@ -4204,25 +4522,20 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, * pool pointer on success */ static struct mvpp2_bm_pool * -mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, - int pkt_size) +mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) { struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; - if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { - netdev_err(port->dev, "mixing pool types is forbidden\n"); + if (pool >= MVPP2_BM_POOLS_NUM) { + netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL; } - if (new_pool->type == MVPP2_BM_FREE) - new_pool->type = type; - /* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet */ - if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || - (new_pool->pkt_size == 0)) { + if (new_pool->pkt_size == 0) { int pkts_num; /* Set default buffer number or free all the buffers in case @@ -4230,12 +4543,10 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, */ pkts_num = new_pool->buf_num; if (pkts_num == 0) - pkts_num = type == MVPP2_BM_SWF_LONG ? - MVPP2_BM_LONG_BUF_NUM : - MVPP2_BM_SHORT_BUF_NUM; + pkts_num = mvpp2_pools[pool].buf_num; else mvpp2_bm_bufs_free(port->dev->dev.parent, - port->priv, new_pool); + port->priv, new_pool, pkts_num); new_pool->pkt_size = pkt_size; new_pool->frag_size = @@ -4261,16 +4572,28 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { int rxq; + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + + /* If port pkt_size is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + long_log_pool = MVPP2_BM_JUMBO; + short_log_pool = MVPP2_BM_LONG; + } else { + long_log_pool = MVPP2_BM_LONG; + short_log_pool = MVPP2_BM_SHORT; + } if (!port->pool_long) { port->pool_long = - mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), - MVPP2_BM_SWF_LONG, - port->pkt_size); + mvpp2_bm_pool_use(port, long_log_pool, + mvpp2_pools[long_log_pool].pkt_size); if (!port->pool_long) return -ENOMEM; - port->pool_long->port_map |= (1 << port->id); + port->pool_long->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -4278,13 +4601,12 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) { port->pool_short = - mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, - MVPP2_BM_SWF_SHORT, - MVPP2_BM_SHORT_PKT_SIZE); + mvpp2_bm_pool_use(port, short_log_pool, + mvpp2_pools[short_log_pool].pkt_size); if (!port->pool_short) return -ENOMEM; - port->pool_short->port_map |= (1 << port->id); + port->pool_short->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, @@ -4297,30 +4619,49 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_bm_pool *port_pool = port->pool_long; - int num, pkts_num = port_pool->buf_num; + enum mvpp2_bm_pool_log_num new_long_pool; int pkt_size = MVPP2_RX_PKT_SIZE(mtu); - /* Update BM pool with new buffer size */ - mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); - if (port_pool->buf_num) { - WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); - return -EIO; - } - - port_pool->pkt_size = pkt_size; - port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + - MVPP2_SKB_SHINFO_SIZE; - num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); - if (num != pkts_num) { - WARN(1, "pool %d: %d of %d allocated\n", - port_pool->id, num, pkts_num); - return -EIO; + /* If port MTU is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) + new_long_pool = MVPP2_BM_JUMBO; + else + new_long_pool = MVPP2_BM_LONG; + + if (new_long_pool != port->pool_long->id) { + /* Remove port from old short & long pool */ + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, + port->pool_long->pkt_size); + port->pool_long->port_map &= ~BIT(port->id); + port->pool_long = NULL; + + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, + port->pool_short->pkt_size); + port->pool_short->port_map &= ~BIT(port->id); + port->pool_short = NULL; + + port->pkt_size = pkt_size; + + /* Add port to new short & long pool */ + mvpp2_swf_bm_pool_init(port); + + /* Update L4 checksum when jumbo enable/disable on port */ + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } } - mvpp2_bm_pool_bufsize_set(port->priv, port_pool, - MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); dev->mtu = mtu; + dev->wanted_features = dev->features; + netdev_update_features(dev); return 0; } @@ -5221,7 +5562,8 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ int cpu = smp_processor_id(); - u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + u32 val = mvpp2_read_relaxed(priv, + MVPP2_AGGR_TXQ_STATUS_REG(cpu)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; } @@ -5245,9 +5587,9 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -5352,8 +5694,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(txq->id)); + val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; @@ -6719,8 +7061,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -7007,15 +7349,14 @@ static int mvpp2_open(struct net_device *dev) 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int err; - err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); + err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); return err; } - err = mvpp2_prs_mac_da_accept(port->priv, port->id, - dev->dev_addr, true); + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); + netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); return err; } err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); @@ -7129,25 +7470,64 @@ static int mvpp2_stop(struct net_device *dev) return 0; } +static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int ret; + + netdev_hw_addr_list_for_each(ha, list) { + ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); + if (ret) + return ret; + } + + return 0; +} + +static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) +{ + if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + mvpp2_prs_vid_enable_filtering(port); + else + mvpp2_prs_vid_disable_filtering(port); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, enable); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, enable); +} + static void mvpp2_set_rx_mode(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2 *priv = port->priv; - struct netdev_hw_addr *ha; - int id = port->id; - bool allmulti = dev->flags & IFF_ALLMULTI; - mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); - mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); - mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); + /* Clear the whole UC and MC list */ + mvpp2_prs_mac_del_all(port); - /* Remove all port->id's mcast enries */ - mvpp2_prs_mcast_del_all(priv, id); + if (dev->flags & IFF_PROMISC) { + mvpp2_set_rx_promisc(port, true); + return; + } + + mvpp2_set_rx_promisc(port, false); - if (allmulti && !netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(ha, dev) - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->uc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, true); + + if (dev->flags & IFF_ALLMULTI) { + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); + return; } + + if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->mc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); } static int mvpp2_set_mac_address(struct net_device *dev, void *p) @@ -7287,6 +7667,48 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return ret; } +static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + ret = mvpp2_prs_vid_entry_add(port, vid); + if (ret) + netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", + MVPP2_PRS_VLAN_FILT_MAX - 1); + return ret; +} + +static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_prs_vid_entry_remove(port, vid); + return 0; +} + +static int mvpp2_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct mvpp2_port *port = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + mvpp2_prs_vid_enable_filtering(port); + } else { + /* Invalidate all registered VID filters for this + * port + */ + mvpp2_prs_vid_remove_all(port); + + mvpp2_prs_vid_disable_filtering(port); + } + } + + return 0; +} + /* Ethtool methods */ /* Set interrupt coalescing for ethtools */ @@ -7428,6 +7850,9 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, .ndo_do_ioctl = mvpp2_ioctl, + .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, + .ndo_set_features = mvpp2_set_features, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { @@ -7938,16 +8363,25 @@ static int mvpp2_port_probe(struct platform_device *pdev, } } - features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO; dev->features = features | NETIF_F_RXCSUM; - dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + } + dev->vlan_features |= features; dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + dev->priv_flags |= IFF_UNICAST_FLT; - /* MTU range: 68 - 9676 */ + /* MTU range: 68 - 9704 */ dev->min_mtu = ETH_MIN_MTU; - /* 9676 == 9700 - 20 and rounding to 8 */ - dev->max_mtu = 9676; + /* 9704 == 9728 - 20 and rounding to 8 */ + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; err = register_netdev(dev); if (err < 0) { @@ -8078,14 +8512,25 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } -/* Initialize Tx FIFO's */ +/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G + * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, + * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. + */ static void mvpp22_tx_fifo_init(struct mvpp2 *priv) { - int port; + int port, size, thrs; - for (port = 0; port < MVPP2_MAX_PORTS; port++) - mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), - MVPP22_TX_FIFO_DATA_SIZE_3KB); + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + if (port == 0) { + size = MVPP22_TX_FIFO_DATA_SIZE_10KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; + } else { + size = MVPP22_TX_FIFO_DATA_SIZE_3KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + } } static void mvpp2_axi_init(struct mvpp2 *priv) @@ -8279,6 +8724,8 @@ static int mvpp2_probe(struct platform_device *pdev) priv->sysctrl_base = NULL; } + mvpp2_setup_bm_pool(); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 31efc47c847e..9c08c3650c02 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3783,7 +3783,7 @@ static int skge_device_event(struct notifier_block *unused, break; case NETDEV_UP: - d = debugfs_create_file(dev->name, S_IRUGO, + d = debugfs_create_file(dev->name, 0444, skge_debug, dev, &skge_debug_fops); if (!d || IS_ERR(d)) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 9fe85300e7b6..697d9b374f5e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4667,7 +4667,7 @@ static int sky2_device_event(struct notifier_block *unused, break; case NETDEV_UP: - sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2->debugfs = debugfs_create_file(dev->name, 0444, sky2_debug, dev, &sky2_debug_fops); if (IS_ERR(sky2->debugfs)) @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 1a0c3bf86ead..752a72499b4f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) { struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; if (priv->cee_config.pfc_state) { int tc; + rx_ppp = prof->rx_ppp; + tx_ppp = prof->tx_ppp; - priv->prof->rx_pause = 0; - priv->prof->tx_pause = 0; for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { u8 tc_mask = 1 << tc; switch (priv->cee_config.dcb_pfc[tc]) { case pfc_disabled: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_full: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp |= tc_mask; + rx_ppp |= tc_mask; break; case pfc_enabled_tx: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp |= tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_rx: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp |= tc_mask; break; default: break; } } - en_dbg(DRV, priv, "Set pfc on\n"); + rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; + tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; } else { - priv->prof->rx_pause = 1; - priv->prof->tx_pause = 1; - en_dbg(DRV, priv, "Set pfc off\n"); + rx_ppp = 0; + tx_ppp = 0; + rx_pause = prof->rx_pause; + tx_pause = prof->tx_pause; } if (mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp)) { + tx_pause, tx_ppp, rx_pause, rx_ppp)) { en_err(priv, "Failed setting pause params\n"); return 1; } + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->tx_pause = tx_pause; + prof->rx_pause = rx_pause; + return 0; } @@ -408,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u32 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", @@ -416,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, pfc->mbc, pfc->delay); - prof->rx_pause = !pfc->pfc_en; - prof->tx_pause = !pfc->pfc_en; - prof->rx_ppp = pfc->pfc_en; - prof->tx_ppp = pfc->pfc_en; + rx_pause = prof->rx_pause && !pfc->pfc_en; + tx_pause = prof->tx_pause && !pfc->pfc_en; + rx_ppp = pfc->pfc_en; + tx_ppp = pfc->pfc_en; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - prof->tx_pause, - prof->tx_ppp, - prof->rx_pause, - prof->rx_ppp); - if (err) + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - prof->rx_ppp, prof->rx_pause, - prof->tx_ppp, prof->tx_pause); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->rx_pause = rx_pause; + prof->tx_pause = tx_pause; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ebc1f566a4d9..a30a2e95d13f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -199,6 +199,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "rx_xdp_drop", "rx_xdp_tx", "rx_xdp_tx_full", + + /* phy statistics */ + "rx_packets_phy", "rx_bytes_phy", + "tx_packets_phy", "tx_bytes_phy", }; static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { @@ -411,6 +415,10 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&priv->xdp_stats)[i]; + for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->phy_stats)[i]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { data[index++] = priv->tx_ring[TX][i]->packets; data[index++] = priv->tx_ring[TX][i]->bytes; @@ -490,6 +498,12 @@ static void mlx4_en_get_strings(struct net_device *dev, strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[strings]); + for (i = 0; i < NUM_PHY_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + for (i = 0; i < priv->tx_ring_num[TX]; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); @@ -1046,27 +1060,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; if (pause->autoneg) return -EINVAL; - priv->prof->tx_pause = pause->tx_pause != 0; - priv->prof->rx_pause = pause->rx_pause != 0; + tx_pause = !!(pause->tx_pause); + rx_pause = !!(pause->rx_pause); + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp); - if (err) - en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - priv->prof->rx_ppp, - priv->prof->rx_pause, - priv->prof->tx_ppp, - priv->prof->tx_pause); + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { + en_err(priv, "Failed setting pause params, err = %d\n", err); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + priv->prof->tx_pause = tx_pause; + priv->prof->rx_pause = rx_pause; + priv->prof->tx_ppp = tx_ppp; + priv->prof->rx_ppp = rx_ppp; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2c2965497ed3..d25e16d2c319 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->udp_rss = 0; } for (i = 1; i <= MLX4_MAX_PORTS; i++) { - params->prof[i].rx_pause = 1; + params->prof[i].rx_pause = !(pfcrx || pfctx); params->prof[i].rx_ppp = pfcrx; - params->prof[i].tx_pause = 1; + params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8fc51bc29003..e0adac4a9a19 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3256,6 +3256,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); last_i += NUM_XDP_STATS; + + if (!mlx4_is_slave(dev)) + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS); + last_i += NUM_PHY_STATS; } int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -3630,10 +3634,6 @@ int mlx4_en_reset_config(struct net_device *dev, mlx4_en_stop_port(dev, 1); } - en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", - ts_config.rx_filter, - !!(features & NETIF_F_HW_VLAN_CTAG_RX)); - mlx4_en_safe_replace_resources(priv, tmp); if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 1fa4849a6f56..0158b88bea5b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -275,19 +275,31 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more); } - if (mlx4_is_master(mdev->dev)) { - stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0, - &mlx4_en_stats->RTOT_prio_1, - NUM_PRIORITIES); - stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0, - &mlx4_en_stats->TTOT_prio_1, - NUM_PRIORITIES); - stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0, - &mlx4_en_stats->ROCT_prio_1, - NUM_PRIORITIES); - stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0, - &mlx4_en_stats->TOCT_prio_1, - NUM_PRIORITIES); + if (!mlx4_is_slave(mdev->dev)) { + struct mlx4_en_phy_stats *p_stats = &priv->phy_stats; + + p_stats->rx_packets_phy = + en_stats_adder(&mlx4_en_stats->RTOT_prio_0, + &mlx4_en_stats->RTOT_prio_1, + NUM_PRIORITIES); + p_stats->tx_packets_phy = + en_stats_adder(&mlx4_en_stats->TTOT_prio_0, + &mlx4_en_stats->TTOT_prio_1, + NUM_PRIORITIES); + p_stats->rx_bytes_phy = + en_stats_adder(&mlx4_en_stats->ROCT_prio_0, + &mlx4_en_stats->ROCT_prio_1, + NUM_PRIORITIES); + p_stats->tx_bytes_phy = + en_stats_adder(&mlx4_en_stats->TOCT_prio_0, + &mlx4_en_stats->TOCT_prio_1, + NUM_PRIORITIES); + if (mlx4_is_master(mdev->dev)) { + stats->rx_packets = p_stats->rx_packets_phy; + stats->tx_packets = p_stats->tx_packets_phy; + stats->rx_bytes = p_stats->rx_bytes_phy; + stats->tx_bytes = p_stats->tx_bytes_phy; + } } /* net device stats */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b4d144e67514..5c613c6663da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -291,13 +291,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); - ring->rx_info = vzalloc_node(tmp, node); + ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node); if (!ring->rx_info) { - ring->rx_info = vzalloc(tmp); - if (!ring->rx_info) { - err = -ENOMEM; - goto err_xdp_info; - } + err = -ENOMEM; + goto err_xdp_info; } en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", @@ -318,7 +315,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, return 0; err_info: - vfree(ring->rx_info); + kvfree(ring->rx_info); ring->rx_info = NULL; err_xdp_info: xdp_rxq_info_unreg(&ring->xdp_rxq); @@ -447,7 +444,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, bpf_prog_put(old_prog); xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); - vfree(ring->rx_info); + kvfree(ring->rx_info); ring->rx_info = NULL; kfree(ring); *pring = NULL; @@ -649,6 +646,12 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, return get_fixed_ipv4_csum(hw_checksum, skb, hdr); } +#if IS_ENABLED(CONFIG_IPV6) +#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6) +#else +#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4) +#endif + int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -662,12 +665,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int polled = 0; int index; - if (unlikely(!priv->port_up)) + if (unlikely(!priv->port_up || budget <= 0)) return 0; - if (unlikely(budget <= 0)) - return polled; - ring = priv->rx_ring[cq_ring]; /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ @@ -821,14 +821,12 @@ xdp_drop_no_cnt: skb_record_rx_queue(skb, cq_ring); if (likely(dev->features & NETIF_F_RXCSUM)) { - if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | - MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { bool l2_tunnel; - if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - cqe->checksum == cpu_to_be16(0xffff))) - goto csum_none; - l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); ip_summed = CHECKSUM_UNNECESSARY; @@ -838,12 +836,7 @@ xdp_drop_no_cnt: ring->csum_ok++; } else { if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && - (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | -#if IS_ENABLED(CONFIG_IPV6) - MLX4_CQE_STATUS_IPV6)))) -#else - 0)))) -#endif + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY)))) goto csum_none; if (check_csum(cqe, skb, va, dev->features)) goto csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 634f603f941c..de6b3d416148 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -37,6 +37,7 @@ #include <linux/module.h> #include <linux/cache.h> #include <linux/kernel.h> +#include <uapi/rdma/mlx4-abi.h> #include "fw.h" #include "icm.h" diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 30cacac54e69..bfef69235d71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -46,6 +46,7 @@ #include <linux/etherdevice.h> #include <net/devlink.h> +#include <uapi/rdma/mlx4-abi.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -2914,10 +2915,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; - if (mlx4_is_mfunc(dev)) - info->port_attr.attr.mode = S_IRUGO; - else { - info->port_attr.attr.mode = S_IRUGO | S_IWUSR; + if (mlx4_is_mfunc(dev)) { + info->port_attr.attr.mode = 0444; + } else { + info->port_attr.attr.mode = 0644; info->port_attr.store = set_port_type; } info->port_attr.show = show_port_type; @@ -2932,10 +2933,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); info->port_mtu_attr.attr.name = info->dev_mtu_name; - if (mlx4_is_mfunc(dev)) - info->port_mtu_attr.attr.mode = S_IRUGO; - else { - info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; + if (mlx4_is_mfunc(dev)) { + info->port_mtu_attr.attr.mode = 0444; + } else { + info->port_mtu_attr.attr.mode = 0644; info->port_mtu_attr.store = set_port_ib_mtu; } info->port_mtu_attr.show = show_port_ib_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f470ae37d937..f7c81133594f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -608,6 +608,7 @@ struct mlx4_en_priv { struct mlx4_en_flow_stats_tx tx_flowstats; struct mlx4_en_port_stats port_stats; struct mlx4_en_xdp_stats xdp_stats; + struct mlx4_en_phy_stats phy_stats; struct mlx4_en_stats_bitmap stats_bitmap; struct list_head mc_list; struct list_head curr_list; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index aab28eb27a30..86b6051da8ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -63,6 +63,14 @@ struct mlx4_en_xdp_stats { #define NUM_XDP_STATS 3 }; +struct mlx4_en_phy_stats { + unsigned long rx_packets_phy; + unsigned long rx_bytes_phy; + unsigned long tx_packets_phy; + unsigned long tx_bytes_phy; +#define NUM_PHY_STATS 4 +}; + #define NUM_MAIN_STATS 21 #define MLX4_NUM_PRIORITIES 8 @@ -116,7 +124,7 @@ enum { #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \ - NUM_XDP_STATS) + NUM_XDP_STATS + NUM_PHY_STATS) #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ sizeof(((struct net_device_stats *)0)->n)) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 606a0e0beeae..29e50f787349 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -5088,6 +5088,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_FS_RULE]); list_del(&fs_rule->com.list); spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule->mirr_mbox); kfree(fs_rule); state = 0; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 25deaa5a534c..c032319f1cb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -46,7 +46,7 @@ config MLX5_MPFS config MLX5_ESWITCH bool "Mellanox Technologies MLX5 SRIOV E-Switch support" - depends on MLX5_CORE_EN + depends on MLX5_CORE_EN && NET_SWITCHDEV default y ---help--- Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 53e69edaedde..9f1b1939716a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -37,24 +37,11 @@ #include "mlx5_core.h" #include "fpga/ipsec.h" -void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) -{ - if (!MLX5_IPSEC_DEV(mdev)) - return ERR_PTR(-EOPNOTSUPP); - - return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd); -} - -int mlx5_accel_ipsec_sa_cmd_wait(void *ctx) -{ - return mlx5_fpga_ipsec_sa_cmd_wait(ctx); -} - u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return mlx5_fpga_ipsec_device_caps(mdev); } +EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) { @@ -67,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, return mlx5_fpga_ipsec_counters_read(mdev, counters, count); } +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr, + spi, is_ipv6); +} + +void mlx5_accel_esp_free_hw_context(void *context) +{ + mlx5_fpga_ipsec_delete_sa_ctx(context); +} + int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) { return mlx5_fpga_ipsec_init(mdev); @@ -76,3 +78,32 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) { mlx5_fpga_ipsec_cleanup(mdev); } + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_accel_esp_xfrm *xfrm; + + xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags); + if (IS_ERR(xfrm)) + return xfrm; + + xfrm->mdev = mdev; + return xfrm; +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); + +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + mlx5_fpga_esp_destroy_xfrm(xfrm); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); + +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return mlx5_fpga_esp_modify_xfrm(xfrm, attrs); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index d6e20fea9554..024dbd22a89b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -34,89 +34,25 @@ #ifndef __MLX5_ACCEL_IPSEC_H__ #define __MLX5_ACCEL_IPSEC_H__ -#ifdef CONFIG_MLX5_ACCEL - #include <linux/mlx5/driver.h> +#include <linux/mlx5/accel.h> -enum { - MLX5_ACCEL_IPSEC_DEVICE = BIT(1), - MLX5_ACCEL_IPSEC_IPV6 = BIT(2), - MLX5_ACCEL_IPSEC_ESP = BIT(3), - MLX5_ACCEL_IPSEC_LSO = BIT(4), -}; - -#define MLX5_IPSEC_SADB_IP_AH BIT(7) -#define MLX5_IPSEC_SADB_IP_ESP BIT(6) -#define MLX5_IPSEC_SADB_SA_VALID BIT(5) -#define MLX5_IPSEC_SADB_SPI_EN BIT(4) -#define MLX5_IPSEC_SADB_DIR_SX BIT(3) -#define MLX5_IPSEC_SADB_IPV6 BIT(2) - -enum { - MLX5_IPSEC_CMD_ADD_SA = 0, - MLX5_IPSEC_CMD_DEL_SA = 1, -}; - -enum mlx5_accel_ipsec_enc_mode { - MLX5_IPSEC_SADB_MODE_NONE = 0, - MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1, - MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3, -}; +#ifdef CONFIG_MLX5_ACCEL #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ - MLX5_ACCEL_IPSEC_DEVICE) - -struct mlx5_accel_ipsec_sa { - __be32 cmd; - u8 key_enc[32]; - u8 key_auth[32]; - __be32 sip[4]; - __be32 dip[4]; - union { - struct { - __be32 reserved; - u8 salt_iv[8]; - __be32 salt; - } __packed gcm; - struct { - u8 salt[16]; - } __packed cbc; - }; - __be32 spi; - __be32 sw_sa_handle; - __be16 tfclen; - u8 enc_mode; - u8 sip_masklen; - u8 dip_masklen; - u8 flags; - u8 reserved[2]; -} __packed; - -/** - * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command - * @mdev: mlx5 device - * @cmd: command to execute - * May be called from atomic context. Returns context pointer, or error - * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic - * context, to cleanup the context pointer - */ -void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd); - -/** - * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion - * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec - * Sleeps (killable) until command execution is complete. - * Returns the command result, or -EINTR if killed - */ -int mlx5_accel_ipsec_sa_cmd_wait(void *context); - -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); + MLX5_ACCEL_IPSEC_CAP_DEVICE) unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6); +void mlx5_accel_esp_free_hw_context(void *context); + int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); @@ -124,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); #define MLX5_IPSEC_DEV(mdev) false +static inline void * +mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return NULL; +} + +static inline void mlx5_accel_esp_free_hw_context(void *context) +{ +} + static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 47239bf7bf43..323ffe8bf7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, } int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_buf *buf, int node) + struct mlx5_frag_buf *buf, int node) { dma_addr_t t; buf->size = size; buf->npages = 1; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, - &t, node); - if (!buf->direct.buf) + + buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL); + if (!buf->frags) return -ENOMEM; - buf->direct.map = t; + buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size, + &t, node); + if (!buf->frags->buf) + goto err_out; + + buf->frags->map = t; while (t & ((1 << buf->page_shift) - 1)) { --buf->page_shift; @@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, } return 0; +err_out: + kfree(buf->frags); + return -ENOMEM; } -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf) { return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); } -EXPORT_SYMBOL_GPL(mlx5_buf_alloc); +EXPORT_SYMBOL(mlx5_buf_alloc); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) { - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); + dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf, + buf->frags->map); + + kfree(buf->frags); } EXPORT_SYMBOL_GPL(mlx5_buf_free); @@ -147,6 +158,7 @@ err_free_buf: err_out: return -ENOMEM; } +EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) { @@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) } kfree(buf->frags); } +EXPORT_SYMBOL_GPL(mlx5_frag_buf_free); static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) @@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) } EXPORT_SYMBOL_GPL(mlx5_db_free); -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) { u64 addr; int i; for (i = 0; i < buf->npages; i++) { - addr = buf->direct.map + (i << buf->page_shift); + addr = buf->frags->map + (i << buf->page_shift); pas[i] = cpu_to_be64(addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e9a1fbcc4adf..21cd1703a862 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -359,6 +359,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: + case MLX5_CMD_OP_QUERY_VNIC_ENV: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: @@ -501,6 +502,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); + MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); @@ -1802,7 +1804,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 1016e05c7ec7..a4179122a279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -58,8 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data) tasklet_ctx.list) { list_del_init(&mcq->tasklet_ctx.list); mcq->tasklet_ctx.comp(mcq); - if (refcount_dec_and_test(&mcq->refcount)) - complete(&mcq->free); + mlx5_cq_put(mcq); if (time_after(jiffies, end)) break; } @@ -80,69 +79,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) * still arrive. */ if (list_empty_careful(&cq->tasklet_ctx.list)) { - refcount_inc(&cq->refcount); + mlx5_cq_hold(cq); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) -{ - struct mlx5_core_cq *cq; - struct mlx5_cq_table *table = &dev->priv.cq_table; - - spin_lock(&table->lock); - cq = radix_tree_lookup(&table->tree, cqn); - if (likely(cq)) - refcount_inc(&cq->refcount); - spin_unlock(&table->lock); - - if (!cq) { - mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); - return; - } - - ++cq->arm_sn; - - cq->comp(cq); - - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); -} - -void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) -{ - struct mlx5_cq_table *table = &dev->priv.cq_table; - struct mlx5_core_cq *cq; - - spin_lock(&table->lock); - - cq = radix_tree_lookup(&table->tree, cqn); - if (cq) - refcount_inc(&cq->refcount); - - spin_unlock(&table->lock); - - if (!cq) { - mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); - return; - } - - cq->event(cq, event_type); - - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); -} - int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) { - struct mlx5_cq_table *table = &dev->priv.cq_table; + int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); + u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; - u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; - int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), - c_eqn); struct mlx5_eq *eq; int err; @@ -159,6 +108,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cons_index = 0; cq->arm_sn = 0; + cq->eq = eq; refcount_set(&cq->refcount, 1); init_completion(&cq->free); if (!cq->comp) @@ -167,12 +117,16 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->tasklet_ctx.priv = &eq->tasklet_ctx; INIT_LIST_HEAD(&cq->tasklet_ctx.list); - spin_lock_irq(&table->lock); - err = radix_tree_insert(&table->tree, cq->cqn, cq); - spin_unlock_irq(&table->lock); + /* Add to comp EQ CQ tree to recv comp events */ + err = mlx5_eq_add_cq(eq, cq); if (err) goto err_cmd; + /* Add to async EQ CQ tree to recv async events */ + err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq); + if (err) + goto err_cq_add; + cq->pid = current->pid; err = mlx5_debug_cq_add(dev, cq); if (err) @@ -183,6 +137,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; +err_cq_add: + mlx5_eq_del_cq(eq, cq); err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); @@ -195,23 +151,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { - struct mlx5_cq_table *table = &dev->priv.cq_table; u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; - struct mlx5_core_cq *tmp; int err; - spin_lock_irq(&table->lock); - tmp = radix_tree_delete(&table->tree, cq->cqn); - spin_unlock_irq(&table->lock); - if (!tmp) { - mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); - return -EINVAL; - } - if (tmp != cq) { - mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); - return -EINVAL; - } + err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); + if (err) + return err; + + err = mlx5_eq_del_cq(cq->eq, cq); + if (err) + return err; MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); @@ -222,8 +172,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) synchronize_irq(cq->irqn); mlx5_debug_cq_remove(dev, cq); - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); + mlx5_cq_put(cq); wait_for_completion(&cq->free); return 0; @@ -270,21 +219,3 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); } EXPORT_SYMBOL(mlx5_core_modify_cq_moderation); - -int mlx5_init_cq_table(struct mlx5_core_dev *dev) -{ - struct mlx5_cq_table *table = &dev->priv.cq_table; - int err; - - memset(table, 0, sizeof(*table)); - spin_lock_init(&table->lock); - INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); - err = mlx5_cq_debugfs_init(dev); - - return err; -} - -void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) -{ - mlx5_cq_debugfs_cleanup(dev); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 17b723218b0c..b994b80d5714 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) +{ + mutex_lock(&mlx5_intf_mutex); + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + mutex_unlock(&mlx5_intf_mutex); +} + void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) { struct mlx5_priv *priv = &mdev->priv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0be4575b58a2..d93ff567b40d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p, "%pI4"); } else if (ethertype.v == ETH_P_IPV6) { static const struct in6_addr full_ones = { - .in6_u.u6_addr32 = {htonl(0xffffffff), - htonl(0xffffffff), - htonl(0xffffffff), - htonl(0xffffffff)}, + .in6_u.u6_addr32 = {__constant_htonl(0xffffffff), + __constant_htonl(0xffffffff), + __constant_htonl(0xffffffff), + __constant_htonl(0xffffffff)}, }; DECLARE_MASK_VAL(struct in6_addr, src_ipv6); DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); @@ -246,6 +246,9 @@ const char *parse_fs_dst(struct trace_seq *p, case MLX5_FLOW_DESTINATION_TYPE_COUNTER: trace_seq_printf(p, "counter_id=%u\n", counter_id); break; + case MLX5_FLOW_DESTINATION_TYPE_PORT: + trace_seq_printf(p, "port\n"); + break; } trace_seq_putc(p, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 80eef4163f52..09f178a3fcab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -136,6 +136,8 @@ TRACE_EVENT(mlx5_fs_del_fg, {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} TRACE_EVENT(mlx5_fs_set_fte, @@ -163,9 +165,9 @@ TRACE_EVENT(mlx5_fs_set_fte, fs_get_obj(__entry->fg, fte->node.parent); __entry->group_index = __entry->fg->id; __entry->index = fte->index; - __entry->action = fte->action; + __entry->action = fte->action.action; __entry->mask_enable = __entry->fg->mask.match_criteria_enable; - __entry->flow_tag = fte->flow_tag; + __entry->flow_tag = fte->action.flow_tag; memcpy(__entry->mask_outer, MLX5_ADDR_OF(fte_match_param, &__entry->fg->mask.match_criteria, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 4c9360b25532..30cad07be2b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -57,24 +57,12 @@ #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) -#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu)) -#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu)) +#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) +#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 -#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 -#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 - #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -93,15 +81,31 @@ #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ - MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_MTTS(wqes) \ - (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) -#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) +#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) +#define MLX5E_MAX_RQ_NUM_MTTS \ + ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ +#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ + (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \ + (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ + (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ + MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) @@ -124,9 +128,15 @@ #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ +#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_ICOSQ_MAX_WQEBBS \ - (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) +#define MLX5E_UMR_WQE_INLINE_SZ \ + (sizeof(struct mlx5e_umr_wqe) + \ + ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \ + MLX5_UMR_MTT_ALIGNMENT)) +#define MLX5E_UMR_WQEBBS \ + (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) +#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_DS_COUNT \ @@ -156,26 +166,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) } } -static inline int mlx5_min_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - } -} - -static inline int mlx5_max_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; - } -} - static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? @@ -198,7 +188,7 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_wqe_data_seg data; + struct mlx5_mtt inline_mtts[0]; }; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; @@ -207,12 +197,14 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "rx_cqe_moder", "tx_cqe_moder", "rx_cqe_compress", + "rx_striding_rq", }; enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), + MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -232,10 +224,7 @@ enum mlx5e_priv_flag { struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; - u16 rq_headroom; - u8 mpwqe_log_stride_sz; - u8 mpwqe_log_num_strides; - u8 log_rq_size; + u8 log_rq_mtu_frames; u16 num_channels; u8 num_tc; bool rx_cqe_compress_def; @@ -243,7 +232,6 @@ struct mlx5e_params { struct net_dim_cq_moder tx_cq_moderation; bool lro_en; u32 lro_wqe_sz; - u16 tx_max_inline; u8 tx_min_inline_mode; u8 rss_hfunc; u8 toeplitz_hash_key[40]; @@ -254,6 +242,8 @@ struct mlx5e_params { u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; + unsigned int sw_mtu; + int hard_mtu; }; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -336,6 +326,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_ENABLED, + MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, }; @@ -369,7 +360,6 @@ struct mlx5e_txqsq { void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; - u16 max_inline; u8 min_inline_mode; u16 edge; struct device *pdev; @@ -383,6 +373,10 @@ struct mlx5e_txqsq { struct mlx5e_channel *channel; int txq_ix; u32 rate_limit; + struct mlx5e_txqsq_recover { + struct work_struct recover_work; + u64 last_recover; + } recover; } ____cacheline_aligned_in_smp; struct mlx5e_xdpsq { @@ -432,7 +426,6 @@ struct mlx5e_icosq { void __iomem *uar_map; u32 sqn; u16 edge; - __be32 mkey_be; unsigned long state; /* control path */ @@ -457,16 +450,13 @@ struct mlx5e_wqe_frag_info { }; struct mlx5e_umr_dma_info { - __be64 *mtt; - dma_addr_t mtt_addr; struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; - struct mlx5e_umr_wqe wqe; }; struct mlx5e_mpw_info { struct mlx5e_umr_dma_info umr; u16 consumed_strides; - u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; + DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; /* a single cache unit is capable to serve one napi call (for non-striding rq) @@ -483,9 +473,16 @@ struct mlx5e_page_cache { struct mlx5e_rq; typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +enum mlx5e_rq_flag { + MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -496,12 +493,12 @@ struct mlx5e_rq { u32 frag_sz; /* max possible skb frag_sz */ union { bool page_reuse; - bool xdp_xmit; }; } wqe; struct { + struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; - void *mtt_no_align; + mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; u16 num_strides; u8 log_stride_sz; bool umr_in_progress; @@ -533,7 +530,9 @@ struct mlx5e_rq { /* XDP */ struct bpf_prog *xdp_prog; + unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; + DECLARE_BITMAP(flags, 8); /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -766,7 +765,6 @@ struct mlx5e_priv { struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; u32 tx_rates[MLX5E_MAX_NUM_SQS]; - int hard_mtu; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -781,7 +779,8 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct hwtstamp_config tstamp; - u16 q_counter; + u16 q_counter; + u16 drop_rq_q_counter; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx dcbx; #endif @@ -831,6 +830,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -840,6 +843,12 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); void mlx5e_update_stats(struct mlx5e_priv *priv); @@ -916,9 +925,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, - u8 rq_type); + struct mlx5e_params *params); static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { @@ -970,11 +979,6 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } -static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) -{ - return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; @@ -1010,7 +1014,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); #endif -u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in, int inlen); void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, @@ -1061,7 +1064,6 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); void mlx5e_update_stats_work(struct work_struct *work); -u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); int mlx5e_bits_invert(unsigned long a, int size); @@ -1102,7 +1104,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels); + u16 max_channels, u16 mtu); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index bac5103efad3..cf58c9637904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -38,17 +38,24 @@ #include <linux/module.h> #include "en.h" -#include "accel/ipsec.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" -struct mlx5e_ipsec_sa_entry { - struct hlist_node hlist; /* Item in SADB_RX hashtable */ - unsigned int handle; /* Handle in SADB_RX */ - struct xfrm_state *x; - struct mlx5e_ipsec *ipsec; - void *context; -}; + +static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa; + + if (!x) + return NULL; + + sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; + if (!sa) + return NULL; + + WARN_ON(sa->x != x); + return sa; +} struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, unsigned int handle) @@ -74,18 +81,16 @@ static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry) unsigned long flags; int ret; - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL); if (ret < 0) - goto out; + return ret; + spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); sa_entry->handle = ret; hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle); - ret = 0; - -out: spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); - return ret; + + return 0; } static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -101,87 +106,99 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - unsigned long flags; - /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */ - synchronize_rcu(); - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); + /* xfrm already doing sync rcu between del and free callbacks */ + ida_simple_remove(&ipsec->halloc, sa_entry->handle); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); } -static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x) +static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) { - unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4; - - switch (key_len) { - case 16: - return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128; - case 32: - return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128; - default: - netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n", - key_len, x->aead->alg_name); - return -1; + struct xfrm_replay_state_esn *replay_esn; + u32 seq_bottom; + u8 overlap; + u32 *esn; + + if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { + sa_entry->esn_state.trigger = 0; + return false; + } + + replay_esn = sa_entry->x->replay_esn; + seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; + overlap = sa_entry->esn_state.overlap; + + sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, + htonl(seq_bottom)); + esn = &sa_entry->esn_state.esn; + + sa_entry->esn_state.trigger = 1; + if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { + ++(*esn); + sa_entry->esn_state.overlap = 0; + return true; + } else if (unlikely(!overlap && + (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) { + sa_entry->esn_state.overlap = 1; + return true; } + + return false; } -static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry, - struct mlx5_accel_ipsec_sa *hw_sa) +static void +mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) { struct xfrm_state *x = sa_entry->x; + struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm; struct aead_geniv_ctx *geniv_ctx; - unsigned int crypto_data_len; struct crypto_aead *aead; - unsigned int key_len; + unsigned int crypto_data_len, key_len; int ivsize; - memset(hw_sa, 0, sizeof(*hw_sa)); - - if (op == MLX5_IPSEC_CMD_ADD_SA) { - crypto_data_len = (x->aead->alg_key_len + 7) / 8; - key_len = crypto_data_len - 4; /* 4 bytes salt at end */ - aead = x->data; - geniv_ctx = crypto_aead_ctx(aead); - ivsize = crypto_aead_ivsize(aead); - - memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len); - /* Duplicate 128 bit key twice according to HW layout */ - if (key_len == 16) - memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len); - memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize); - hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len)); - } + memset(attrs, 0, sizeof(*attrs)); - hw_sa->cmd = htonl(op); - hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN; - if (x->props.family == AF_INET) { - hw_sa->sip[3] = x->props.saddr.a4; - hw_sa->dip[3] = x->id.daddr.a4; - hw_sa->sip_masklen = 32; - hw_sa->dip_masklen = 32; - } else { - memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip)); - memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip)); - hw_sa->sip_masklen = 128; - hw_sa->dip_masklen = 128; - hw_sa->flags |= MLX5_IPSEC_SADB_IPV6; - } - hw_sa->spi = x->id.spi; - hw_sa->sw_sa_handle = htonl(sa_entry->handle); - switch (x->id.proto) { - case IPPROTO_ESP: - hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP; - break; - case IPPROTO_AH: - hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH; - break; - default: - break; + /* key */ + crypto_data_len = (x->aead->alg_key_len + 7) / 8; + key_len = crypto_data_len - 4; /* 4 bytes salt at end */ + + memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len); + aes_gcm->key_len = key_len * 8; + + /* salt and seq_iv */ + aead = x->data; + geniv_ctx = crypto_aead_ctx(aead); + ivsize = crypto_aead_ivsize(aead); + memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize); + memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, + sizeof(aes_gcm->salt)); + + /* iv len */ + aes_gcm->icv_len = x->aead->alg_icv_len; + + /* esn */ + if (sa_entry->esn_state.trigger) { + attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; + attrs->esn = sa_entry->esn_state.esn; + if (sa_entry->esn_state.overlap) + attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; } - hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x); - if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) - hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX; + + /* rx handle */ + attrs->sa_handle = sa_entry->handle; + + /* algo type */ + attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; + + /* action */ + attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ? + MLX5_ACCEL_ESP_ACTION_ENCRYPT : + MLX5_ACCEL_ESP_ACTION_DECRYPT; + /* flags */ + attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ? + MLX5_ACCEL_ESP_FLAGS_TRANSPORT : + MLX5_ACCEL_ESP_FLAGS_TUNNEL; } static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) @@ -203,7 +220,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Cannot offload compressed xfrm states\n"); return -EINVAL; } - if (x->props.flags & XFRM_STATE_ESN) { + if (x->props.flags & XFRM_STATE_ESN && + !(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_CAP_ESN)) { netdev_info(netdev, "Cannot offload ESN xfrm states\n"); return -EINVAL; } @@ -251,7 +270,8 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.family == AF_INET6 && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) { + !(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_CAP_IPV6)) { netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); return -EINVAL; } @@ -262,9 +282,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct net_device *netdev = x->xso.dev; - struct mlx5_accel_ipsec_sa hw_sa; + struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; - void *context; + __be32 saddr[4] = {0}, daddr[4] = {0}, spi; + bool is_ipv6 = false; int err; priv = netdev_priv(netdev); @@ -291,22 +312,49 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err); goto err_entry; } + } else { + sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? + mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; } - mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa); - context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa); - if (IS_ERR(context)) { - err = PTR_ERR(context); + /* check esn */ + mlx5e_ipsec_update_esn_state(sa_entry); + + /* create xfrm */ + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); + sa_entry->xfrm = + mlx5_accel_esp_create_xfrm(priv->mdev, &attrs, + MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA); + if (IS_ERR(sa_entry->xfrm)) { + err = PTR_ERR(sa_entry->xfrm); goto err_sadb_rx; } - err = mlx5_accel_ipsec_sa_cmd_wait(context); - if (err) - goto err_sadb_rx; + /* create hw context */ + if (x->props.family == AF_INET) { + saddr[3] = x->props.saddr.a4; + daddr[3] = x->id.daddr.a4; + } else { + memcpy(saddr, x->props.saddr.a6, sizeof(saddr)); + memcpy(daddr, x->id.daddr.a6, sizeof(daddr)); + is_ipv6 = true; + } + spi = x->id.spi; + sa_entry->hw_context = + mlx5_accel_esp_create_hw_context(priv->mdev, + sa_entry->xfrm, + saddr, daddr, spi, + is_ipv6); + if (IS_ERR(sa_entry->hw_context)) { + err = PTR_ERR(sa_entry->hw_context); + goto err_xfrm; + } x->xso.offload_handle = (unsigned long)sa_entry; goto out; +err_xfrm: + mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); err_sadb_rx: if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { mlx5e_ipsec_sadb_rx_del(sa_entry); @@ -320,43 +368,26 @@ out: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; - struct mlx5_accel_ipsec_sa hw_sa; - void *context; + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (!x->xso.offload_handle) + if (!sa_entry) return; - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; - WARN_ON(sa_entry->x != x); - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) mlx5e_ipsec_sadb_rx_del(sa_entry); - - mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa); - context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa); - if (IS_ERR(context)) - return; - - sa_entry->context = context; } static void mlx5e_xfrm_free_state(struct xfrm_state *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; - int res; + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (!x->xso.offload_handle) + if (!sa_entry) return; - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; - WARN_ON(sa_entry->x != x); - - res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context); - sa_entry->context = NULL; - if (res) { - /* Leftover object will leak */ - return; + if (sa_entry->hw_context) { + flush_workqueue(sa_entry->ipsec->wq); + mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } if (x->xso.flags & XFRM_OFFLOAD_INBOUND) @@ -383,6 +414,14 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ida_init(&ipsec->halloc); ipsec->en_priv = priv; ipsec->en_priv->ipsec = ipsec; + ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); + ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, + priv->netdev->name); + if (!ipsec->wq) { + kfree(ipsec); + return -ENOMEM; + } netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } @@ -394,6 +433,9 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) if (!ipsec) return; + drain_workqueue(ipsec->wq); + destroy_workqueue(ipsec->wq); + ida_destroy(&ipsec->halloc); kfree(ipsec); priv->ipsec = NULL; @@ -414,11 +456,58 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) return true; } +struct mlx5e_ipsec_modify_state_work { + struct work_struct work; + struct mlx5_accel_esp_xfrm_attrs attrs; + struct mlx5e_ipsec_sa_entry *sa_entry; +}; + +static void _update_xfrm_state(struct work_struct *work) +{ + int ret; + struct mlx5e_ipsec_modify_state_work *modify_work = + container_of(work, struct mlx5e_ipsec_modify_state_work, work); + struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry; + + ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm, + &modify_work->attrs); + if (ret) + netdev_warn(sa_entry->ipsec->en_priv->netdev, + "Not an IPSec offload device\n"); + + kfree(modify_work); +} + +static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_ipsec_modify_state_work *modify_work; + bool need_update; + + if (!sa_entry) + return; + + need_update = mlx5e_ipsec_update_esn_state(sa_entry); + if (!need_update) + return; + + modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC); + if (!modify_work) + return; + + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs); + modify_work->sa_entry = sa_entry; + + INIT_WORK(&modify_work->work, _update_xfrm_state); + WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work)); +} + static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_add = mlx5e_xfrm_add_state, .xdo_dev_state_delete = mlx5e_xfrm_del_state, .xdo_dev_state_free = mlx5e_xfrm_free_state, .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, + .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, }; void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) @@ -429,7 +518,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) if (!priv->ipsec) return; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) || + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || !MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); return; @@ -448,7 +537,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) || + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || !MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 56e00baf16cc..1198fc1eba4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,7 +40,11 @@ #include <net/xfrm.h> #include <linux/idr.h> +#include "accel/ipsec.h" + #define MLX5E_IPSEC_SADB_RX_BITS 10 +#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L + #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_LEN 8 @@ -77,10 +81,30 @@ struct mlx5e_ipsec_stats { struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); + bool no_trailer; spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */ struct ida halloc; struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_stats stats; + struct workqueue_struct *wq; +}; + +struct mlx5e_ipsec_esn_state { + u32 esn; + u8 trigger: 1; + u8 overlap: 1; +}; + +struct mlx5e_ipsec_sa_entry { + struct hlist_node hlist; /* Item in SADB_RX hashtable */ + struct mlx5e_ipsec_esn_state esn_state; + unsigned int handle; /* Handle in SADB_RX */ + struct xfrm_state *x; + struct mlx5e_ipsec *ipsec; + struct mlx5_accel_esp_xfrm *xfrm; + void *hw_context; + void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); }; void mlx5e_ipsec_build_inverse_table(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 6a7c8b04447e..c245d8e78509 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -42,10 +42,11 @@ enum { MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11, MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12, + MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17, }; struct mlx5e_ipsec_rx_metadata { - unsigned char reserved; + unsigned char nexthdr; __be32 sa_handle; } __packed; @@ -175,7 +176,30 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, } } -static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo) +void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo) +{ + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; + __u32 oseq = replay_esn->oseq; + int iv_offset; + __be64 seqno; + u32 seq_hi; + + if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID && + MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) { + seq_hi = xo->seq.hi - 1; + } else { + seq_hi = xo->seq.hi; + } + + /* Place the SN in the IV field */ + seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32)); + iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr); + skb_store_bits(skb, iv_offset, &seqno, 8); +} + +void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo) { int iv_offset; __be64 seqno; @@ -227,6 +251,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct xfrm_offload *xo = xfrm_offload(skb); struct mlx5e_ipsec_metadata *mdata; + struct mlx5e_ipsec_sa_entry *sa_entry; struct xfrm_state *x; if (!xo) @@ -261,7 +286,8 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, goto drop; } mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo); - mlx5e_ipsec_set_iv(skb, xo); + sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; + sa_entry->set_iv_op(skb, x, xo); mlx5e_ipsec_set_metadata(skb, mdata, xo); return skb; @@ -301,10 +327,17 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, switch (mdata->syndrome) { case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED: xo->status = CRYPTO_SUCCESS; + if (likely(priv->ipsec->no_trailer)) { + xo->flags |= XFRM_ESP_NO_TRAILER; + xo->proto = mdata->content.rx.nexthdr; + } break; case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED: xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; break; + case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO: + xo->status = CRYPTO_INVALID_PROTOCOL; + break; default: atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index e37ae2598dbb..2bfbbef1b054 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -37,6 +37,7 @@ #ifdef CONFIG_MLX5_EN_IPSEC #include <linux/skbuff.h> +#include <net/xfrm.h> #include "en.h" struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, @@ -46,6 +47,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); +void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); +void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe *wqe, struct sk_buff *skb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 784e282803db..db3278cc052b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -70,7 +70,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, return -ENOMEM; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index cc8048f68f11..37fd0245b6c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -203,9 +203,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, { int i, idx = 0; - if (!data) - return; - mutex_lock(&priv->state_lock); mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); @@ -223,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mlx5e_ethtool_get_ethtool_stats(priv, stats, data); } -static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, - int num_wqe) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_wqe; - - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; - wqe_size = stride_size * num_strides; - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); -} - -static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, - int num_packets) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - int num_wqes; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_packets; - - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; - wqe_size = stride_size * num_strides; - - num_packets = (1 << order_base_2(num_packets)); - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); - return 1 << (order_base_2(num_wqes)); -} - void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; - - param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << priv->channels.params.log_rq_size); + param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->tx_pending = 1 << priv->channels.params.log_sq_size; } @@ -291,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; struct mlx5e_channels new_channels = {}; - u32 rx_pending_wqes; - u32 min_rq_size; u8 log_rq_size; u8 log_sq_size; - u32 num_mtts; int err = 0; if (param->rx_jumbo_pending) { @@ -311,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_min_log_rq_size(rq_wq_type)); - rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, - param->rx_pending); - - if (param->rx_pending < min_rq_size) { + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - min_rq_size); - return -EINVAL; - } - - num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); - if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - !MLX5E_VALID_NUM_MTTS(num_mtts)) { - netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", - __func__, param->rx_pending); + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); return -EINVAL; } @@ -338,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - log_rq_size = order_base_2(rx_pending_wqes); + log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending); - if (log_rq_size == priv->channels.params.log_rq_size && + if (log_rq_size == priv->channels.params.log_rq_mtu_frames && log_sq_size == priv->channels.params.log_sq_size) return 0; mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - new_channels.params.log_rq_size = log_rq_size; + new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { @@ -477,6 +409,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev, return mlx5e_ethtool_get_coalesce(priv, coal); } +#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD +#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT + static void mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { @@ -511,6 +446,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, if (!MLX5_CAP_GEN(mdev, cq_moderation)) return -EOPNOTSUPP; + if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", + __func__, MLX5E_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", + __func__, MLX5E_MAX_COAL_FRAMES); + return -ERANGE; + } + mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; @@ -1066,16 +1015,66 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, return err; } +#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 +#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 +#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 +#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80 +#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \ + max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \ + (critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100) + +static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev, + u16 *pfc_prevention_tout) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL); +} + +static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev, + u16 pfc_preven) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 critical_tout; + u16 minor; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ? + MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC : + pfc_preven; + + if (critical_tout != PFC_STORM_PREVENTION_DISABLE && + (critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC || + critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) { + netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n", + __func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, + MLX5E_PFC_PREVEN_TOUT_MAX_MSEC); + return -EINVAL; + } + + minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout); + return mlx5_set_port_stall_watermark(mdev, critical_tout, + minor); +} + static int mlx5e_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) { - const struct mlx5e_priv *priv = netdev_priv(dev); - int err = 0; + int err; switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - *(u32 *)data = priv->channels.params.tx_max_inline; + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_get_pfc_prevention_tout(dev, data); break; default: err = -EINVAL; @@ -1090,34 +1089,13 @@ static int mlx5e_set_tunable(struct net_device *dev, const void *data) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; - int err = 0; - u32 val; + int err; mutex_lock(&priv->state_lock); switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - val = *(u32 *)data; - if (val > mlx5e_get_max_inline_cap(mdev)) { - err = -EINVAL; - break; - } - - new_channels.params = priv->channels.params; - new_channels.params.tx_max_inline = val; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - break; - } - - err = mlx5e_open_channels(priv, &new_channels); - if (err) - break; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data); break; default: err = -EINVAL; @@ -1507,11 +1485,6 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val new_channels.params = priv->channels.params; MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - new_channels.params.mpwqe_log_stride_sz = - MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val); - new_channels.params.mpwqe_log_num_strides = - MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; return 0; @@ -1549,6 +1522,38 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return 0; } +static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (enable) { + if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + return -EOPNOTSUPP; + if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) + return -EINVAL; + } + + new_channels.params = priv->channels.params; + + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); + mlx5e_set_rq_type(mdev, &new_channels.params); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1594,6 +1599,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_CQE_COMPRESS, set_pflag_rx_cqe_compress); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_STRIDING_RQ, + set_pflag_rx_striding_rq); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 93291ec4a3d1..b29c1d93f058 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,56 +71,145 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; }; -static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { - return MLX5_CAP_GEN(mdev, striding_rq) && + bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && MLX5_CAP_ETH(mdev, reg_umr_sq); + u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); + bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + + if (!striding_rq_umr) + return false; + if (!inline_umr) { + mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", + (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); + return false; + } + return true; +} + +static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +{ + if (!params->xdp_prog) { + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; + + return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); + } + + return PAGE_SIZE; +} + +static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) +{ + u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + + return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); +} + +static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + s8 signed_log_num_strides_param; + u8 log_num_strides; + + if (params->lro_en || frag_sz > PAGE_SIZE) + return false; + + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) + return true; + + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); + signed_log_num_strides_param = + (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; + + return signed_log_num_strides_param >= 0; +} + +static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) +{ + if (params->log_rq_mtu_frames < + mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + + return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); +} + +static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + + return MLX5E_MPWQE_STRIDE_SZ(mdev, + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); +} + +static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return MLX5_MPWRQ_LOG_WQE_SZ - + mlx5e_mpwqe_get_log_stride_size(mdev, params); +} + +static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + + linear_rq_headroom += NET_IP_ALIGN; + + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) + return linear_rq_headroom; + + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return linear_rq_headroom; + + return 0; } void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, u8 rq_type) + struct mlx5e_params *params) { - params->rq_wq_type = rq_type; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + params->log_rq_mtu_frames = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev, - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); - params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - - params->mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - params->rq_headroom = params->xdp_prog ? - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; - params->rq_headroom += NET_IP_ALIGN; - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= params->rq_headroom + + params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - BIT(params->log_rq_size), - BIT(params->mpwqe_log_stride_sz), + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + BIT(mlx5e_mpwqe_get_log_rq_size(params)) : + BIT(params->log_rq_mtu_frames), + BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } -static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; - mlx5e_init_rq_type_params(mdev, params, rq_type); + return mlx5e_check_fragmented_striding_rq_cap(mdev) && + !MLX5_IPSEC_DEV(mdev) && + !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params)); +} + +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_LINKED_LIST; } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -153,26 +242,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void mlx5e_tx_timeout_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - tx_timeout_work); - int err; - - rtnl_lock(); - mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); - if (err) - netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", - err); -unlock: - mutex_unlock(&priv->state_lock); - rtnl_unlock(); -} - void mlx5e_update_stats(struct mlx5e_priv *priv) { int i; @@ -235,107 +304,38 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); } -static inline int mlx5e_get_wqe_mtt_sz(void) -{ - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the mtt array, we allocate - * a little more. - */ - return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), - MLX5_UMR_MTT_ALIGNMENT); -} - static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - struct mlx5e_umr_wqe *wqe, - u16 ix) + struct mlx5e_umr_wqe *wqe) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; - struct mlx5_wqe_data_seg *dseg = &wqe->data; - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); - u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); + u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->imm = rq->mkey_be; - ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); - ucseg->bsf_octowords = - cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); - - dseg->lkey = sq->mkey_be; - dseg->addr = cpu_to_be64(wi->umr.mtt_addr); } static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) - goto err_out; - - /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, - cpu_to_node(c->cpu)); - if (unlikely(!rq->mpwqe.mtt_no_align)) - goto err_free_wqe_info; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc, - MLX5_UMR_ALIGN); - wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz, - PCI_DMA_TODEVICE); - if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr))) - goto err_unmap_mtts; + return -ENOMEM; - mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i); - } + mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); return 0; - -err_unmap_mtts: - while (--i >= 0) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); -err_free_wqe_info: - kfree(rq->mpwqe.info); - -err_out: - return -ENOMEM; -} - -static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) -{ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int i; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); - kfree(rq->mpwqe.info); } static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, @@ -347,9 +347,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u32 *in; int err; - if (!MLX5E_VALID_NUM_MTTS(npages)) - return -EINVAL; - in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -360,7 +357,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); @@ -382,6 +379,11 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } +static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +{ + return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -415,6 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -428,11 +431,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->buff.headroom = params->rq_headroom; + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -450,8 +452,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz; - rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides); + rq->mpwqe.skb_from_cqe_mpwrq = + mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; + rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); + rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; @@ -490,7 +496,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, byte_count = params->lro_en ? params->lro_wqe_sz : - MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); + MLX5E_SW2HW_MTU(params, params->sw_mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; @@ -510,9 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); - wqe->data.addr = cpu_to_be64(dma_offset); + wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); } wqe->data.byte_count = cpu_to_be32(byte_count); @@ -558,7 +564,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - mlx5e_rq_free_mpwqe_info(rq); + kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -615,8 +621,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { - struct mlx5e_channel *c = rq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; @@ -898,7 +903,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; int err; - sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -953,6 +957,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) return 0; } +static void mlx5e_sq_recover(struct work_struct *work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -970,8 +975,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->channel = c; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; - sq->max_inline = params->tx_max_inline; sq->min_inline_mode = params->tx_min_inline_mode; + INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); @@ -1038,6 +1043,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); @@ -1156,9 +1162,20 @@ err_free_txqsq: return err; } +static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) +{ + WARN_ONCE(sq->cc != sq->pc, + "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", + sq->sqn, sq->cc, sq->pc); + sq->cc = 0; + sq->dma_fifo_cc = 0; + sq->pc = 0; +} + static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@ -1195,14 +1212,118 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) { struct mlx5e_channel *c = sq->channel; struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_rate_limit rl = {0}; mlx5e_destroy_sq(mdev, sq->sqn); - if (sq->rate_limit) - mlx5_rl_remove_rate(mdev, sq->rate_limit); + if (sq->rate_limit) { + rl.rate = sq->rate_limit; + mlx5_rl_remove_rate(mdev, &rl); + } mlx5e_free_txqsq_descs(sq); mlx5e_free_txqsq(sq); } +static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (sq->cc == sq->pc) + return 0; + + msleep(20); + } + + netdev_err(sq->channel->netdev, + "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", + sq->sqn, sq->cc, sq->pc); + + return -ETIMEDOUT; +} + +static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + struct mlx5e_modify_sq_param msp = {0}; + int err; + + msp.curr_state = curr_state; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); + return err; + } + + return 0; +} + +static void mlx5e_sq_recover(struct work_struct *work) +{ + struct mlx5e_txqsq_recover *recover = + container_of(work, struct mlx5e_txqsq_recover, + recover_work); + struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, + recover); + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + u8 state; + int err; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", + sq->sqn, err); + return; + } + + if (state != MLX5_RQC_STATE_ERR) { + netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); + return; + } + + netif_tx_disable_queue(sq->txq); + + if (mlx5e_wait_for_sq_flush(sq)) + return; + + /* If the interval between two consecutive recovers per SQ is too + * short, don't recover to avoid infinite loop of ERR_CQE -> recover. + * If we reached this state, there is probably a bug that needs to be + * fixed. let's keep the queue close and let tx timeout cleanup. + */ + if (jiffies_to_msecs(jiffies - recover->last_recover) < + MLX5E_SQ_RECOVER_MIN_INTERVAL) { + netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", + sq->sqn); + return; + } + + /* At this point, no new packets will arrive from the stack as TXQ is + * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all + * pending WQEs. SQ can safely reset the SQ. + */ + if (mlx5e_sq_to_ready(sq, state)) + return; + + mlx5e_reset_txqsq_cc_pc(sq); + sq->stats.recover++; + recover->last_recover = jiffies; + mlx5e_activate_txqsq(sq); +} + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, @@ -1528,6 +1649,7 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_modify_sq_param msp = {0}; + struct mlx5_rate_limit rl = {0}; u16 rl_index = 0; int err; @@ -1535,14 +1657,17 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, /* nothing to do */ return 0; - if (sq->rate_limit) + if (sq->rate_limit) { + rl.rate = sq->rate_limit; /* remove current rl index to free space to next ones */ - mlx5_rl_remove_rate(mdev, sq->rate_limit); + mlx5_rl_remove_rate(mdev, &rl); + } sq->rate_limit = 0; if (rate) { - err = mlx5_rl_add_rate(mdev, rate, &rl_index); + rl.rate = rate; + err = mlx5_rl_add_rate(mdev, &rl_index, &rl); if (err) { netdev_err(dev, "Failed configuring rate %u: %d\n", rate, err); @@ -1560,7 +1685,7 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, rate, err); /* remove the rate from the table */ if (rate) - mlx5_rl_remove_rate(mdev, rate); + mlx5_rl_remove_rate(mdev, &rl); return err; } @@ -1743,38 +1868,49 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9); - MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6); + MLX5_SET(wq, wq, log_wqe_num_of_strides, + mlx5e_mpwqe_get_log_num_strides(mdev, params) - + MLX5_MPWQE_LOG_NUM_STRIDES_BASE); + MLX5_SET(wq, wq, log_wqe_stride_size, + mlx5e_mpwqe_get_log_stride_size(mdev, params) - + MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); } MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); - MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); - MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); - param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.linear = 1; } -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, + struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); + + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); } static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, @@ -1813,15 +1949,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *cqc = param->cqc; u8 log_cq_size; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides; + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - log_cq_size = params->log_rq_size; + log_cq_size = params->log_rq_mtu_frames; } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); @@ -2372,10 +2510,10 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); } -static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) +static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 mtu) { - struct mlx5_core_dev *mdev = priv->mdev; - u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu); + u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); int err; err = mlx5_set_port_mtu(mdev, hw_mtu, 1); @@ -2387,9 +2525,9 @@ static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) return 0; } -static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 *mtu) { - struct mlx5_core_dev *mdev = priv->mdev; u16 hw_mtu = 0; int err; @@ -2397,25 +2535,27 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) if (err || !hw_mtu) /* fallback to port oper mtu */ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); - *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu); + *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); } static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { + struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; u16 mtu; int err; - err = mlx5e_set_mtu(priv, netdev->mtu); + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; - mlx5e_query_mtu(priv, &mtu); - if (mtu != netdev->mtu) + mlx5e_query_mtu(mdev, params, &mtu); + if (mtu != params->sw_mtu) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", - __func__, mtu, netdev->mtu); + __func__, mtu, params->sw_mtu); - netdev->mtu = mtu; + params->sw_mtu = mtu; return 0; } @@ -2569,6 +2709,9 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); + if (mlx5e_vxlan_allowed(priv->mdev)) + udp_tunnel_get_rx_info(netdev); + return err; } @@ -2634,18 +2777,22 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); + return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, +static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_rq *drop_rq) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; struct mlx5e_rq_param rq_param = {}; struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(&rq_param); + mlx5e_build_drop_rq_param(priv, &rq_param); err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) @@ -2663,6 +2810,10 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, if (err) goto err_free_rq; + err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err); + return 0; err_free_rq: @@ -2994,8 +3145,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev, } #endif -int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { switch (type) { #ifdef CONFIG_MLX5_ESWITCH @@ -3088,20 +3239,28 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + struct mlx5e_params *old_params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST); - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + old_params = &priv->channels.params; + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - new_channels.params = priv->channels.params; + new_channels.params = *old_params; new_channels.params.lro_en = enable; + if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) + reset = false; + } + if (!reset) { - priv->channels.params = new_channels.params; + *old_params = new_channels.params; err = mlx5e_modify_tirs_lro(priv); goto out; } @@ -3230,24 +3389,20 @@ static int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = netdev->features; - int err; + int err = 0; - err = mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_LRO, set_feature_lro); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_FILTER, +#define MLX5E_HANDLE_FEATURE(feature, handler) \ + mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) + + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_TC, set_feature_tc_num_filters); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXALL, set_feature_rx_all); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXFCS, set_feature_rx_fcs); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_RFS_ACCEL - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_NTUPLE, set_feature_arfs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); #endif if (err) { @@ -3281,34 +3436,40 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = !priv->channels.params.lro_en && - (priv->channels.params.rq_wq_type != - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + params = &priv->channels.params; + reset = !params->lro_en; reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; + + if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); + + reset = reset && (ppw_old != ppw_new); + } if (!reset) { + params->sw_mtu = new_mtu; mlx5e_set_dev_port_mtu(priv); + netdev->mtu = params->sw_mtu; goto out; } - new_channels.params = priv->channels.params; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -3598,21 +3759,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - int irqn_not_used, eqn; - struct mlx5_eq *eq; + struct mlx5_eq *eq = sq->cq.mcq.eq; u32 eqe_count; - if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used)) - return false; - - eq = mlx5_eqn2eq(mdev, eqn); - if (IS_ERR(eq)) - return false; - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eqn, eq->cons_index, eq->irqn); + eq->eqn, eq->cons_index, eq->irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) @@ -3623,13 +3774,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, return true; } -static void mlx5e_tx_timeout(struct net_device *dev) +static void mlx5e_tx_timeout_work(struct work_struct *work) { - struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + tx_timeout_work); + struct net_device *dev = priv->netdev; bool reopen_channels = false; - int i; + int i, err; - netdev_err(dev, "TX timeout detected\n"); + rtnl_lock(); + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); @@ -3637,7 +3794,9 @@ static void mlx5e_tx_timeout(struct net_device *dev) if (!netif_xmit_stopped(dev_queue)) continue; - netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + + netdev_err(dev, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - dev_queue->trans_start)); @@ -3650,8 +3809,27 @@ static void mlx5e_tx_timeout(struct net_device *dev) } } - if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state)) - schedule_work(&priv->tx_timeout_work); + if (!reopen_channels) + goto unlock; + + mlx5e_close_locked(dev); + err = mlx5e_open_locked(dev); + if (err) + netdev_err(priv->netdev, + "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + err); + +unlock: + mutex_unlock(&priv->state_lock); + rtnl_unlock(); +} + +static void mlx5e_tx_timeout(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + netdev_err(dev, "TX timeout detected\n"); + queue_work(priv->wq, &priv->tx_timeout_work); } static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) @@ -3701,7 +3879,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bpf_prog_put(old_prog); if (reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_params(priv->mdev, &priv->channels.params); + mlx5e_set_rq_type(priv->mdev, &priv->channels.params); if (was_opened && reset) mlx5e_open_locked(netdev); @@ -3846,15 +4024,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) return 0; } -u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) -{ - int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; - - return bf_buf_size - - sizeof(struct mlx5e_tx_wqe) + - 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; -} - void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { @@ -3864,16 +4033,20 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, indirection_rqt[i] = i % num_channels; } -static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) +static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) { - return (link_speed && pci_bw && - (pci_bw < 40000) && (pci_bw < link_speed)); -} + u32 link_speed = 0; + u32 pci_bw = 0; -static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw) -{ - return !(link_speed && pci_bw && - (pci_bw <= 16000) && (pci_bw < link_speed)); + mlx5e_get_max_linkspeed(mdev, &link_speed); + pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); + mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + +#define MLX5E_SLOW_PCI_RATIO (2) + + return link_speed && pci_bw && + link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; } void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) @@ -3925,7 +4098,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } -u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) +static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -3939,20 +4112,15 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels) + u16 max_channels, u16 mtu) { - u8 cq_period_mode = 0; - u32 link_speed = 0; - u32 pci_bw = 0; + u8 rx_cq_period_mode; + params->sw_mtu = mtu; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = max_channels; params->num_tc = 1; - mlx5e_get_max_linkspeed(mdev, &link_speed); - pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); - mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", - link_speed, pci_bw); - /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -3962,30 +4130,34 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && MLX5_CAP_GEN(mdev, vport_group_manager)) - params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); + params->rx_cqe_compress_def = slow_pci_heuristic(mdev); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ - mlx5e_set_rq_params(mdev, params); + if (mlx5e_striding_rq_possible(mdev, params)) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, + !slow_pci_heuristic(mdev)); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); /* HW LRO */ /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - params->lro_en = hw_lro_heuristic(link_speed, pci_bw); + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + params->lro_en = !slow_pci_heuristic(mdev); params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ - cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - mlx5e_set_tx_cq_mode_params(params, cq_period_mode); + mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); + mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); /* TX inline */ - params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ @@ -4007,9 +4179,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mutex_init(&priv->state_lock); @@ -4033,7 +4205,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@ -4068,6 +4240,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->vlan_features |= NETIF_F_RXCSUM; netdev->vlan_features |= NETIF_F_RXHASH; + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (!!MLX5_CAP_ETH(mdev, lro_cap)) netdev->vlan_features |= NETIF_F_LRO; @@ -4139,7 +4314,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4147,7 +4322,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_ipsec_build_netdev(priv); } -static void mlx5e_create_q_counter(struct mlx5e_priv *priv) +static void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -4157,14 +4332,21 @@ static void mlx5e_create_q_counter(struct mlx5e_priv *priv) mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); priv->q_counter = 0; } + + err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter); + if (err) { + mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err); + priv->drop_rq_q_counter = 0; + } } -static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) +static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { - if (!priv->q_counter) - return; + if (priv->q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); - mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); + if (priv->drop_rq_q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); } static void mlx5e_nic_init(struct mlx5_core_dev *mdev, @@ -4276,7 +4458,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv); mlx5_lag_add(mdev, netdev); @@ -4291,12 +4473,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_init_app(priv); #endif - /* Device already registered: sync netdev system state */ - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - udp_tunnel_get_rx_info(netdev); - rtnl_unlock(); - } queue_work(priv->wq, &priv->set_rx_mode_work); @@ -4403,18 +4579,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (err) goto out; - err = mlx5e_open_drop_rq(mdev, &priv->drop_rq); + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_cleanup_tx; + goto err_destroy_q_counters; } err = profile->init_rx(priv); if (err) goto err_close_drop_rq; - mlx5e_create_q_counter(priv); - if (profile->enable) profile->enable(priv); @@ -4423,7 +4599,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); -err_cleanup_tx: +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); out: @@ -4440,9 +4617,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) profile->disable(priv); flush_workqueue(priv->wq); - mlx5e_destroy_q_counter(priv); profile->cleanup_rx(priv); mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 363d8dcb7f17..d8f68e4d1018 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -44,6 +44,11 @@ #include "en_tc.h" #include "fs_core.h" +#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) +#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) + static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; static void mlx5e_rep_get_drvinfo(struct net_device *dev, @@ -209,7 +214,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num) + u32 *sqns_array, int sqns_num) { struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; @@ -255,9 +260,9 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) struct mlx5e_channel *c; int n, tc, num_sqs = 0; int err = -ENOMEM; - u16 *sqs; + u32 *sqs; - sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL); + sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); if (!sqs) goto out; @@ -288,7 +293,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) { #if IS_ENABLED(CONFIG_IPV6) - unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, + unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME); #else unsigned long ipv6_interval = ~0UL; @@ -424,7 +429,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, case NETEVENT_NEIGH_UPDATE: n = ptr; #if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) + if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) #else if (n->tbl != &arp_tbl) #endif @@ -472,7 +477,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, * done per device delay prob time parameter. */ #if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) + if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) #else if (!p->dev || p->tbl != &arp_tbl) #endif @@ -668,7 +673,6 @@ static int mlx5e_rep_open(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; mutex_lock(&priv->state_lock); @@ -676,8 +680,9 @@ static int mlx5e_rep_open(struct net_device *dev) if (err) goto unlock; - if (!mlx5_eswitch_set_vport_state(esw, rep->vport, - MLX5_ESW_VPORT_ADMIN_STATE_UP)) + if (!mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -690,11 +695,12 @@ static int mlx5e_rep_close(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; mutex_lock(&priv->state_lock); - (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -877,14 +883,14 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + params->hard_mtu = MLX5E_ETH_HARD_MTU; + params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->num_tc = 1; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; @@ -899,9 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; -#ifdef CONFIG_NET_SWITCHDEV netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; -#endif netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; @@ -927,8 +931,6 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = profile->max_nch(mdev); - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_rep_params(mdev, &priv->channels.params); mlx5e_build_rep_netdev(netdev); @@ -1156,6 +1158,15 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) kfree(ppriv); /* mlx5e_rep_priv */ } +static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + + rpriv = mlx5e_rep_to_rep_priv(rep); + + return rpriv->netdev; +} + static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1168,6 +1179,7 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) rep_if.load = mlx5e_vport_rep_load; rep_if.unload = mlx5e_vport_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); } } @@ -1195,6 +1207,7 @@ void mlx5e_register_vport_reps(struct mlx5e_priv *priv) rep_if.load = mlx5e_nic_rep_load; rep_if.unload = mlx5e_nic_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; rep_if.priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0d4bb0688faa..176645762e49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -36,6 +36,7 @@ #include <linux/tcp.h> #include <linux/bpf_trace.h> #include <net/busy_poll.h> +#include <net/ip6_checksum.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -52,7 +53,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, void *data) { - u32 ci = cqcc & cq->wq.sz_m1; + u32 ci = cqcc & cq->wq.fbc.sz_m1; memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); } @@ -74,9 +75,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) { - u8 op_own = (cqcc >> cq->wq.log_sz) & 1; - u32 wq_sz = 1 << cq->wq.log_sz; - u32 ci = cqcc & cq->wq.sz_m1; + struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc; + u8 op_own = (cqcc >> fbc->log_sz) & 1; + u32 wq_sz = 1 << fbc->log_sz; + u32 ci = cqcc & fbc->sz_m1; u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { @@ -101,7 +103,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; cq->title.op_own &= 0xf0; - cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); + cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz); cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) @@ -294,46 +296,36 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_wqe(rq, wi); } -static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) -{ - return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; -} - static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 frag_offset, - u32 len) + struct mlx5e_dma_info *di, + u32 frag_offset, u32 len) { unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); dma_sync_single_for_cpu(rq->pdev, - wi->umr.dma_info[page_idx].addr + frag_offset, + di->addr + frag_offset, len, DMA_FROM_DEVICE); - wi->skbs_frags[page_idx]++; + page_ref_inc(di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - wi->umr.dma_info[page_idx].page, frag_offset, - len, truesize); + di->page, frag_offset, len, truesize); } static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 offset, - u32 headlen) + struct mlx5e_dma_info *dma_info, + u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; unsigned int len; /* Aligning len to sizeof(long) optimizes memcpy performance */ len = ALIGN(headlen_pg, sizeof(long)); dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, 0, - page_address(dma_info->page) + offset, - len); + skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len); + if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; headlen_pg = len; @@ -346,14 +338,49 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) +void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) +{ + const bool no_xdp_xmit = + bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); + struct mlx5e_dma_info *dma_info = wi->umr.dma_info; + int i; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + mlx5e_page_release(rq, &dma_info[i], true); +} + +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + rq->mpwqe.umr_in_progress = false; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); +} + +static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) +{ + return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; +} + +static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_umr_wqe *wqe; - u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); + struct mlx5e_umr_wqe *umr_wqe; + u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); + int err; u16 pi; + int i; /* fill sq edge with nops to avoid wqe wrap around */ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { @@ -361,90 +388,44 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_post_nop(wq, sq->sqn, &sq->pc); } - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); - wqe->ctrl.opmod_idx_opcode = - cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | - MLX5_OPCODE_UMR); - - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; - sq->pc += num_wqebbs; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); -} - -static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, - u16 ix) -{ - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - int err; - int i; + umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); + if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, + offsetof(struct mlx5e_umr_wqe, inline_mtts)); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) goto err_unmap; - wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR); - page_ref_add(dma_info->page, pg_strides); + umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); } - memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); + bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; + rq->mpwqe.umr_in_progress = true; + + umr_wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->pc += MLX5E_UMR_WQEBBS; + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + return 0; err_unmap: while (--i >= 0) { dma_info--; - page_ref_sub(dma_info->page, pg_strides); mlx5e_page_release(rq, dma_info, true); } + rq->stats.buff_alloc_err++; return err; } -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) -{ - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - int i; - - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { - page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); - mlx5e_page_release(rq, dma_info, true); - } -} - -static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) -{ - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - - rq->mpwqe.umr_in_progress = false; - - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - - /* ensure wqes are visible to device before updating doorbell record */ - dma_wmb(); - - mlx5_wq_ll_update_db_record(wq); -} - -static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) -{ - int err; - - err = mlx5e_alloc_rx_umr_mpwqe(rq, ix); - if (unlikely(err)) { - rq->stats.buff_alloc_err++; - return err; - } - rq->mpwqe.umr_in_progress = true; - mlx5e_post_umr_wqe(rq, ix); - return 0; -} - void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -543,7 +524,23 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); - return true; + return false; +} + +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) +{ + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); + + tcp->check = 0; + tcp->psh = get_cqe_lro_tcppsh(cqe); + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } } static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, @@ -552,14 +549,11 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, struct ethhdr *eth = (struct ethhdr *)(skb->data); struct tcphdr *tcp; int network_depth = 0; + __wsum check; __be16 proto; u16 tot_len; void *ip_p; - u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); - u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || - (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); - proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); tot_len = cqe_bcnt - network_depth; @@ -576,23 +570,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, ipv4->ihl); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, + tot_len - sizeof(struct iphdr), + IPPROTO_TCP, check); } else { + u16 payload_len = tot_len - sizeof(struct ipv6hdr); struct ipv6hdr *ipv6 = ip_p; tcp = ip_p + sizeof(struct ipv6hdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; ipv6->hop_limit = cqe->lro_min_ttl; - ipv6->payload_len = cpu_to_be16(tot_len - - sizeof(struct ipv6hdr)); - } - - tcp->psh = get_cqe_lro_tcppsh(cqe); - - if (tcp_ack) { - tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; + ipv6->payload_len = cpu_to_be16(payload_len); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, + IPPROTO_TCP, check); } } @@ -745,8 +746,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, prefetchw(wqe); - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || - MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) { + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { rq->stats.xdp_drop++; return false; } @@ -785,7 +785,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, /* move page to reference to sq responsibility, * and mark so it's not put back in page-cache. */ - rq->wqe.xdp_xmit = true; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ sq->db.di[pi] = *di; sq->pc++; @@ -834,6 +834,24 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, } static inline +struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +static inline struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -848,10 +866,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - dma_sync_single_range_for_cpu(rq->pdev, - di->addr + wi->offset, - 0, frag_size, - DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); prefetch(data); wi->offset += frag_size; @@ -866,18 +882,13 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, if (consumed) return NULL; /* page/packet was consumed by XDP */ - skb = build_skb(va, frag_size); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + if (unlikely(!skb)) return NULL; - } /* queue up for recycling/reuse */ page_ref_inc(di->page); - skb_reserve(skb, rx_headroom); - skb_put(skb, cqe_bcnt); - return skb; } @@ -899,9 +910,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } @@ -941,9 +951,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } @@ -966,23 +975,28 @@ wq_ll_pop: } #endif -static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, - struct mlx5e_mpw_info *wi, - u32 cqe_bcnt, - struct sk_buff *skb) +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); - u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; - u32 head_offset = wqe_offset & (PAGE_SIZE - 1); - u32 page_idx = wqe_offset >> PAGE_SHIFT; - u32 head_page_idx = page_idx; u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; - u16 byte_cnt = cqe_bcnt - headlen; + u32 byte_cnt = cqe_bcnt - headlen; + struct mlx5e_dma_info *head_di = di; + struct sk_buff *skb; + + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); if (unlikely(frag_offset >= PAGE_SIZE)) { - page_idx++; + di++; frag_offset -= PAGE_SIZE; } @@ -990,18 +1004,59 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset, + mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; - page_idx++; + di++; } /* copy header */ - mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx, + mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; + + return skb; +} + +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) +{ + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; + u16 rx_headroom = rq->buff.headroom; + u32 cqe_bcnt32 = cqe_bcnt; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + bool consumed; + + va = page_address(di->page) + head_offset; + data = va + rx_headroom; + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); + + dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, + frag_size, DMA_FROM_DEVICE); + prefetch(data); + + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32); + rcu_read_unlock(); + if (consumed) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) + __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ + return NULL; /* page/packet was consumed by XDP */ + } + + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; } void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) @@ -1009,7 +1064,11 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; u16 cqe_bcnt; @@ -1025,18 +1084,13 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; } - skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, - sizeof(long))); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; - goto mpwrq_cqe_out; - } - - prefetchw(skb->data); cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); - mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); + skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, + page_idx); + if (!skb) + goto mpwrq_cqe_out; + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); @@ -1044,6 +1098,7 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5a4608281f38..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, if (iph->protocol != IPPROTO_UDP) goto out; - udph = udp_hdr(skb); + /* Don't assume skb_transport_header() was set */ + udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl); if (udph->dest != htons(9)) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 5f0f3493d747..b08c94422907 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -60,6 +60,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -153,6 +155,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_recover += sq_stats->recover; s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_none += sq_stats->csum_none; @@ -170,11 +174,24 @@ static const struct counter_desc q_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, }; +static const struct counter_desc drop_rq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, +}; + #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) +#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv) { - return priv->q_counter ? NUM_Q_COUNTERS : 0; + int num_stats = 0; + + if (priv->q_counter) + num_stats += NUM_Q_COUNTERS; + + if (priv->drop_rq_q_counter) + num_stats += NUM_DROP_RQ_COUNTERS; + + return num_stats; } static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) @@ -182,7 +199,13 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) int i; for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); + strcpy(data + (idx++) * ETH_GSTRING_LEN, + q_stats_desc[i].format); + + for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + drop_rq_stats_desc[i].format); + return idx; } @@ -191,7 +214,11 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) int i; for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) - data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i); + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + q_stats_desc, i); + for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + drop_rq_stats_desc, i); return idx; } @@ -199,16 +226,76 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) { struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; - int err; - if (!priv->q_counter) - return; + if (priv->q_counter && + !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, + sizeof(out))) + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + if (priv->drop_rq_q_counter && + !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0, + out, sizeof(out))) + qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out, + out_of_buffer); +} + +#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) +static const struct counter_desc vnic_env_stats_desc[] = { + { "rx_steer_missed_packets", + VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, +}; + +#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc) + +static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) +{ + return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ? + NUM_VNIC_ENV_COUNTERS : 0; +} + +static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + return idx; + + for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + return idx; - err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out)); - if (err) + for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_desc, i); + return idx; +} + +static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) +{ + u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; + int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out); + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) return; - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); + MLX5_SET(query_vnic_env_in, in, opcode, + MLX5_CMD_OP_QUERY_VNIC_ENV); + MLX5_SET(query_vnic_env_in, in, op_mod, 0); + MLX5_SET(query_vnic_env_in, in, other_vport, 0); + mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) @@ -754,7 +841,15 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +static const struct counter_desc pport_pfc_stall_stats_desc[] = { + { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, + { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, +}; + #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) +#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ + MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { @@ -790,7 +885,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) { return (mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * - NUM_PPORT_PER_PRIO_PFC_COUNTERS; + NUM_PPORT_PER_PRIO_PFC_COUNTERS + + NUM_PPORT_PFC_STALL_COUNTERS(priv); } static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, @@ -818,6 +914,10 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, } } + for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_pfc_stall_stats_desc[i].format); + return idx; } @@ -845,6 +945,10 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, } } + for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], + pport_pfc_stall_stats_desc, i); + return idx; } @@ -1003,6 +1107,8 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, }; static const struct counter_desc ch_stats_desc[] = { @@ -1095,6 +1201,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .update_stats = mlx5e_grp_q_update_stats, }, { + .get_num_stats = mlx5e_grp_vnic_env_get_num_stats, + .fill_strings = mlx5e_grp_vnic_env_fill_strings, + .fill_stats = mlx5e_grp_vnic_env_fill_stats, + .update_stats = mlx5e_grp_vnic_env_update_stats, + }, + { .get_num_stats = mlx5e_grp_vport_get_num_stats, .fill_strings = mlx5e_grp_vport_fill_strings, .fill_stats = mlx5e_grp_vport_fill_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 0b3320a2b072..53111a2df587 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -78,6 +78,8 @@ struct mlx5e_sw_stats { u64 tx_queue_wake; u64 tx_queue_dropped; u64 tx_xmit_more; + u64 tx_cqe_err; + u64 tx_recover; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -97,6 +99,11 @@ struct mlx5e_sw_stats { struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; + u32 rx_if_down_packets; +}; + +struct mlx5e_vnic_env_stats { + __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; }; #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ @@ -192,6 +199,8 @@ struct mlx5e_sq_stats { u64 stopped; u64 wake; u64 dropped; + u64 cqe_err; + u64 recover; }; struct mlx5e_ch_stats { @@ -201,6 +210,7 @@ struct mlx5e_ch_stats { struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; + struct mlx5e_vnic_env_stats vnic; struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd98b0dc610f..4197001f9801 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -675,6 +675,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, + .has_flow_tag = true, .flow_tag = attr->flow_tag, .encap_id = 0, }; @@ -963,7 +964,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) tbl = &arp_tbl; #if IS_ENABLED(CONFIG_IPV6) else if (m_neigh->family == AF_INET6) - tbl = ipv6_stub->nd_tbl; + tbl = &nd_tbl; #endif else return; @@ -2529,11 +2530,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan = tcf_vlan_push_vid(a); + attr->vlan_vid = tcf_vlan_push_vid(a); + if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { + attr->vlan_prio = tcf_vlan_push_prio(a); + attr->vlan_proto = tcf_vlan_push_proto(a); + if (!attr->vlan_proto) + attr->vlan_proto = htons(ETH_P_8021Q); + } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) { + return -EOPNOTSUPP; + } } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } @@ -2607,19 +2614,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, if (err != -EAGAIN) flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + kvfree(parse_attr); + err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); - if (err) - goto err_del_rule; + if (err) { + mlx5e_tc_del_flow(priv, flow); + kfree(flow); + } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH && - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) - kvfree(parse_attr); return err; -err_del_rule: - mlx5e_tc_del_flow(priv, flow); - err_free: kvfree(parse_attr); kfree(flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 569b42a01026..20297108528a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, default: hlen = mlx5e_skb_l2_header_offset(skb); } - return min_t(u16, hlen, skb->len); + return min_t(u16, hlen, skb_headlen(skb)); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, @@ -417,6 +417,18 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb, wqe, pi); } +static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, + struct mlx5_err_cqe *err_cqe) +{ + u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); + + netdev_err(sq->channel->netdev, + "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", + sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome, + err_cqe->vendor_err_synd); + mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_txqsq *sq; @@ -456,6 +468,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wqe_counter = be16_to_cpu(cqe->wqe_counter); + if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, + &sq->state)) { + mlx5e_dump_error_cqe(sq, + (struct mlx5_err_cqe *)cqe); + queue_work(cq->channel->priv->wq, + &sq->recover.recover_work); + } + sq->stats.cqe_err++; + } + do { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; @@ -509,7 +532,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && - mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) { + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, + MLX5E_SQ_STOP_ROOM) && + !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); sq->stats.wake++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 25106e996a96..c1c94974e16b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev, } } +/* caller must eventually call mlx5_cq_put on the returned cq */ +static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) +{ + struct mlx5_cq_table *table = &eq->cq_table; + struct mlx5_core_cq *cq = NULL; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + mlx5_cq_hold(cq); + spin_unlock(&table->lock); + + return cq; +} + +static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn) +{ + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); + + if (unlikely(!cq)) { + mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + mlx5_cq_put(cq); +} + +static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) +{ + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); + + if (unlikely(!cq)) { + mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + mlx5_cq_put(cq); +} + static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) { struct mlx5_eq *eq = eq_ptr; @@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) switch (eqe->type) { case MLX5_EVENT_TYPE_COMP: cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; - mlx5_cq_completion(dev, cqn); + mlx5_eq_cq_completion(eq, cqn); break; case MLX5_EVENT_TYPE_DCT_DRAINED: rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; @@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", cqn, eqe->data.cq_err.syndrome); - mlx5_cq_event(dev, cqn, eqe->type); + mlx5_eq_cq_event(eq, cqn, eqe->type); break; case MLX5_EVENT_TYPE_PAGE_REQUEST: @@ -567,6 +612,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, enum mlx5_eq_type type) { + struct mlx5_cq_table *cq_table = &eq->cq_table; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; irq_handler_t handler; @@ -576,6 +622,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, u32 *in; int err; + /* Init CQ table */ + memset(cq_table, 0, sizeof(*cq_table)); + spin_lock_init(&cq_table->lock); + INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + eq->type = type; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; @@ -669,7 +720,6 @@ err_buf: mlx5_buf_free(dev, &eq->buf); return err; } -EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { @@ -696,7 +746,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) return err; } -EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); + +int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &eq->cq_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + + return err; +} + +int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &eq->cq_table; + struct mlx5_core_cq *tmp; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + + if (!tmp) { + mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); + return -ENOENT; + } + + if (tmp != cq) { + mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn); + return -EINVAL; + } + + return 0; +} int mlx5_eq_init(struct mlx5_core_dev *dev) { @@ -840,4 +923,3 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, MLX5_SET(query_eq_in, in, eq_number, eq->eqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } -EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5ecf2cddc16d..332bc56306bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + /* Create steering drop counters for ingress and egress ACLs */ + if (vport_num && esw->mode == SRIOV_LEGACY) + esw_vport_create_drop_counters(vport); + /* Restore old vport configuration */ esw_apply_vport_conf(esw, vport); @@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, if (!vport_num) vport->info.trusted = true; - /* create steering drop counters for ingress and egress ACLs */ - if (vport_num && esw->mode == SRIOV_LEGACY) - esw_vport_create_drop_counters(vport); - esw_vport_change_handle_locked(vport); esw->enabled_vports++; @@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw->mode = mode; - if (mode == SRIOV_LEGACY) + if (mode == SRIOV_LEGACY) { err = esw_create_legacy_fdb_table(esw, nvfs + 1); - else + } else { + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + err = esw_offloads_init(esw, nvfs + 1); + } + if (err) goto abort; @@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) abort: esw->mode = SRIOV_NONE; + + if (mode == SRIOV_OFFLOADS) + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + return err; } void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + int old_mode; int nvports; int i; @@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) else if (esw->mode == SRIOV_OFFLOADS) esw_offloads_cleanup(esw, nvports); + old_mode = esw->mode; esw->mode = SRIOV_NONE; + + if (old_mode == SRIOV_OFFLOADS) + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) @@ -2083,17 +2096,19 @@ unlock: return err; } -static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, - int vport_idx, - struct mlx5_vport_drop_stats *stats) +static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + int vport_idx, + struct mlx5_vport_drop_stats *stats) { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_vport *vport = &esw->vports[vport_idx]; + u64 rx_discard_vport_down, tx_discard_vport_down; u64 bytes = 0; u16 idx = 0; + int err = 0; if (!vport->enabled || esw->mode != SRIOV_LEGACY) - return; + return 0; if (vport->egress.drop_counter) { idx = vport->egress.drop_counter->id; @@ -2104,6 +2119,23 @@ static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, idx = vport->ingress.drop_counter->id; mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes); } + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && + !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + return 0; + + err = mlx5_query_vport_down_stats(dev, vport_idx, + &rx_discard_vport_down, + &tx_discard_vport_down); + if (err) + return err; + + if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) + stats->rx_dropped += rx_discard_vport_down; + if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + stats->tx_dropped += tx_discard_vport_down; + + return 0; } int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -2167,7 +2199,9 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); - mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + if (err) + goto free_out; vf_stats->rx_dropped = stats.rx_dropped; vf_stats->tx_dropped = stats.tx_dropped; @@ -2175,3 +2209,9 @@ free_out: kvfree(out); return err; } + +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +{ + return esw->mode; +} +EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2fa037066b2f..4cd773fa55e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -37,19 +37,9 @@ #include <linux/if_link.h> #include <net/devlink.h> #include <linux/mlx5/device.h> +#include <linux/mlx5/eswitch.h> #include "lib/mpfs.h" -enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS -}; - -enum { - REP_ETH, - NUM_REP_TYPES, -}; - #ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -139,29 +129,13 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_table *fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; - struct mlx5_flow_handle *miss_rule; + struct mlx5_flow_handle *miss_rule_uni; + struct mlx5_flow_handle *miss_rule_multi; int vlan_push_pop_refcount; } offloads; }; }; -struct mlx5_eswitch_rep; -struct mlx5_eswitch_rep_if { - int (*load)(struct mlx5_core_dev *dev, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch_rep *rep); - void *priv; - bool valid; -}; - -struct mlx5_eswitch_rep { - struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; - u16 vport; - u8 hw_id[ETH_ALEN]; - u16 vlan; - u32 vlan_refcount; -}; - struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; @@ -231,9 +205,6 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); -struct mlx5_flow_handle * -mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, - u32 sqn); void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); struct mlx5_flow_spec; @@ -256,15 +227,14 @@ enum { SET_VLAN_INSERT = BIT(1) }; -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000 -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000 - struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; int action; - u16 vlan; + __be16 vlan_proto; + u16 vlan_vid; + u8 vlan_prio; bool vlan_handled; u32 encap_id; u32 mod_hdr_id; @@ -278,13 +248,6 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); -void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - struct mlx5_eswitch_rep_if *rep_if, - u8 rep_type); -void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - u8 rep_type); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, @@ -294,6 +257,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos, u8 set_flags); +static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); +} + #define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define esw_info(dev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 99f583a15cc3..35e256eb2f6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -58,8 +58,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); - /* per flow vlan pop/push is emulated, don't set that into the firmware */ - flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); + flow_act.action = attr->action; + /* if per flow vlan pop/push is emulated, don't set that into the firmware */ + if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) + flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); + else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + flow_act.vlan.ethtype = ntohs(attr->vlan_proto); + flow_act.vlan.vid = attr->vlan_vid; + flow_act.vlan.prio = attr->vlan_prio; + } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; @@ -88,10 +96,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, @@ -185,7 +193,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, /* protects against (1) setting rules with different vlans to push and * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) */ - if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan)) + if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid)) goto out_notsupp; return 0; @@ -202,6 +210,10 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, bool push, pop, fwd; int err = 0; + /* nop if we're on the vlan push/pop non emulation mode */ + if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + return 0; + push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); @@ -239,11 +251,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (vport->vlan_refcount) goto skip_set_push; - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0, + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0, SET_VLAN_INSERT | SET_VLAN_STRIP); if (err) goto out; - vport->vlan = attr->vlan; + vport->vlan = attr->vlan_vid; skip_set_push: vport->vlan_refcount++; } @@ -261,6 +273,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, bool push, pop, fwd; int err = 0; + /* nop if we're on the vlan push/pop non emulation mode */ + if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + return 0; + if (!attr->vlan_handled) return 0; @@ -338,6 +354,7 @@ out: kvfree(spec); return flow_rule; } +EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) { @@ -350,7 +367,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_spec *spec; + void *headers_c; + void *headers_v; int err = 0; + u8 *dmac_c; + u8 *dmac_v; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { @@ -358,6 +379,13 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) goto out; } + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, + outer_headers.dmac_47_16); + dmac_c[0] = 0x01; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; @@ -366,11 +394,28 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); - esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); + esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); goto out; } - esw->fdb_table.offloads.miss_rule = flow_rule; + esw->fdb_table.offloads.miss_rule_uni = flow_rule; + + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); + dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, + outer_headers.dmac_47_16); + dmac_v[0] = 0x01; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); + goto out; + } + + esw->fdb_table.offloads.miss_rule_multi = flow_rule; + out: kvfree(spec); return err; @@ -426,6 +471,7 @@ static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) } #define MAX_PF_SQ 256 +#define MAX_SQ_NVPORTS 32 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) { @@ -438,6 +484,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) struct mlx5_flow_group *g; void *match_criteria; u32 *flow_group_in; + u8 *dmac; esw_debug(esw->dev, "Create offloads FDB Tables\n"); flow_group_in = kvzalloc(inlen, GFP_KERNEL); @@ -455,7 +502,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto fast_fdb_err; - table_size = nvports + MAX_PF_SQ + 1; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; ft_attr.max_fte = table_size; ft_attr.prio = FDB_SLOW_PATH; @@ -478,7 +525,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - ix = nvports + MAX_PF_SQ; + ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); @@ -492,10 +539,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) /* create miss group */ memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR(g)) { @@ -531,7 +584,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); - mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); @@ -789,14 +843,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) { int err; - /* disable PF RoCE so missed packets don't go through RoCE steering */ - mlx5_dev_list_lock(); - mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - err = esw_create_offloads_fdb_tables(esw, nvports); if (err) - goto create_fdb_err; + return err; err = esw_create_offloads_table(esw); if (err) @@ -821,12 +870,6 @@ create_fg_err: create_ft_err: esw_destroy_offloads_fdb_tables(esw); -create_fdb_err: - /* enable back PF RoCE */ - mlx5_dev_list_lock(); - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - return err; } @@ -844,9 +887,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) } /* enable back PF RoCE */ - mlx5_dev_list_lock(); - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); return err; } @@ -1160,10 +1201,12 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, rep_if->load = __rep_if->load; rep_if->unload = __rep_if->unload; + rep_if->get_proto_dev = __rep_if->get_proto_dev; rep_if->priv = __rep_if->priv; rep_if->valid = true; } +EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, int vport_index, u8 rep_type) @@ -1178,6 +1221,7 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, rep->rep_if[rep_type].valid = false; } +EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { @@ -1188,3 +1232,35 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) rep = &offloads->vport_reps[UPLINK_REP_INDEX]; return rep->rep_if[rep_type].priv; } + +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + struct mlx5_eswitch_rep *rep; + + if (vport == FDB_UPLINK_VPORT) + vport = UPLINK_REP_INDEX; + + rep = &offloads->vport_reps[vport]; + + if (rep->rep_if[rep_type].valid && + rep->rep_if[rep_type].get_proto_dev) + return rep->rep_if[rep_type].get_proto_dev(rep); + return NULL; +} +EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); + +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) +{ + return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); +} +EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); + +struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, + int vport) +{ + return &esw->offloads.vport_reps[vport]; +} +EXPORT_SYMBOL(mlx5_eswitch_vport_rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index e6175f8ac0e4..de7fe087d6fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -232,7 +232,7 @@ static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, return -ENOMEM; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 35d0e33381ca..0f5da499a223 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -31,49 +31,91 @@ * */ +#include <linux/rhashtable.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/fs_helpers.h> +#include <linux/mlx5/fs.h> +#include <linux/rbtree.h> #include "mlx5_core.h" +#include "fs_cmd.h" #include "fpga/ipsec.h" #include "fpga/sdk.h" #include "fpga/core.h" #define SBU_QP_QUEUE_SIZE 8 +#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000) -enum mlx5_ipsec_response_syndrome { - MLX5_IPSEC_RESPONSE_SUCCESS = 0, - MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, - MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2, - MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, +enum mlx5_fpga_ipsec_cmd_status { + MLX5_FPGA_IPSEC_CMD_PENDING, + MLX5_FPGA_IPSEC_CMD_SEND_FAIL, + MLX5_FPGA_IPSEC_CMD_COMPLETE, }; -enum mlx5_fpga_ipsec_sacmd_status { - MLX5_FPGA_IPSEC_SACMD_PENDING, - MLX5_FPGA_IPSEC_SACMD_SEND_FAIL, - MLX5_FPGA_IPSEC_SACMD_COMPLETE, -}; - -struct mlx5_ipsec_command_context { +struct mlx5_fpga_ipsec_cmd_context { struct mlx5_fpga_dma_buf buf; - struct mlx5_accel_ipsec_sa sa; - enum mlx5_fpga_ipsec_sacmd_status status; + enum mlx5_fpga_ipsec_cmd_status status; + struct mlx5_ifc_fpga_ipsec_cmd_resp resp; int status_code; struct completion complete; struct mlx5_fpga_device *dev; struct list_head list; /* Item in pending_cmds */ + u8 command[0]; +}; + +struct mlx5_fpga_esp_xfrm; + +struct mlx5_fpga_ipsec_sa_ctx { + struct rhash_head hash; + struct mlx5_ifc_fpga_ipsec_sa hw_sa; + struct mlx5_core_dev *dev; + struct mlx5_fpga_esp_xfrm *fpga_xfrm; +}; + +struct mlx5_fpga_esp_xfrm { + unsigned int num_rules; + struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; + struct mutex lock; /* xfrm lock */ + struct mlx5_accel_esp_xfrm accel_xfrm; +}; + +struct mlx5_fpga_ipsec_rule { + struct rb_node node; + struct fs_fte *fte; + struct mlx5_fpga_ipsec_sa_ctx *ctx; }; -struct mlx5_ipsec_sadb_resp { - __be32 syndrome; - __be32 sw_sa_handle; - u8 reserved[24]; -} __packed; +static const struct rhashtable_params rhash_sa = { + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), + .automatic_shrinking = true, + .min_size = 1, +}; struct mlx5_fpga_ipsec { + struct mlx5_fpga_device *fdev; struct list_head pending_cmds; spinlock_t pending_cmds_lock; /* Protects pending_cmds */ u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; struct mlx5_fpga_conn *conn; + + struct notifier_block fs_notifier_ingress_bypass; + struct notifier_block fs_notifier_egress; + + /* Map hardware SA --> SA context + * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) + * We will use this hash to avoid SAs duplication in fpga which + * aren't allowed + */ + struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ + struct mutex sa_hash_lock; + + /* Tree holding all rules for this fpga device + * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id) + */ + struct rb_root rules_rb; + struct mutex rules_rb_lock; /* rules lock */ }; static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) @@ -97,28 +139,29 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf, u8 status) { - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; if (status) { - context = container_of(buf, struct mlx5_ipsec_command_context, + context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context, buf); mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", status); - context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL; + context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL; complete(&context->complete); } } -static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome) +static inline +int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) { switch (syndrome) { - case MLX5_IPSEC_RESPONSE_SUCCESS: + case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS: return 0; - case MLX5_IPSEC_RESPONSE_SADB_ISSUE: + case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE: return -EEXIST; - case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST: + case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST: return -EINVAL; - case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: + case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: return -EIO; } return -EIO; @@ -126,9 +169,9 @@ static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome) static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) { - struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data; - struct mlx5_ipsec_command_context *context; - enum mlx5_ipsec_response_syndrome syndrome; + struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; + struct mlx5_fpga_ipsec_cmd_context *context; + enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; struct mlx5_fpga_device *fdev = cb_arg; unsigned long flags; @@ -138,12 +181,12 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) return; } - mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n", - ntohl(resp->syndrome), ntohl(resp->sw_sa_handle)); + mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n", + ntohl(resp->syndrome)); spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, - struct mlx5_ipsec_command_context, + struct mlx5_fpga_ipsec_cmd_context, list); if (context) list_del(&context->list); @@ -155,51 +198,48 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) } mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); - if (context->sa.sw_sa_handle != resp->sw_sa_handle) { - mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", - ntohl(context->sa.sw_sa_handle), - ntohl(resp->sw_sa_handle)); - return; - } - syndrome = ntohl(resp->syndrome); context->status_code = syndrome_to_errno(syndrome); - context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE; + context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE; + memcpy(&context->resp, resp, sizeof(*resp)); if (context->status_code) - mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n", + mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n", syndrome); + complete(&context->complete); } -void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) +static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, + const void *cmd, int cmd_size) { - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; struct mlx5_fpga_device *fdev = mdev->fpga; unsigned long flags; - int res = 0; + int res; - BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0); if (!fdev || !fdev->ipsec) return ERR_PTR(-EOPNOTSUPP); - context = kzalloc(sizeof(*context), GFP_ATOMIC); + if (cmd_size & 3) + return ERR_PTR(-EINVAL); + + context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC); if (!context) return ERR_PTR(-ENOMEM); - memcpy(&context->sa, cmd, sizeof(*cmd)); + context->status = MLX5_FPGA_IPSEC_CMD_PENDING; + context->dev = fdev; context->buf.complete = mlx5_fpga_ipsec_send_complete; - context->buf.sg[0].size = sizeof(context->sa); - context->buf.sg[0].data = &context->sa; init_completion(&context->complete); - context->dev = fdev; + memcpy(&context->command, cmd, cmd_size); + context->buf.sg[0].size = cmd_size; + context->buf.sg[0].data = &context->command; + spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); list_add_tail(&context->list, &fdev->ipsec->pending_cmds); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - context->status = MLX5_FPGA_IPSEC_SACMD_PENDING; - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); if (res) { mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", @@ -214,47 +254,103 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, return context; } -int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) +static int mlx5_fpga_ipsec_cmd_wait(void *ctx) { - struct mlx5_ipsec_command_context *context = ctx; + struct mlx5_fpga_ipsec_cmd_context *context = ctx; + unsigned long timeout = + msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); int res; - res = wait_for_completion_killable(&context->complete); - if (res) { + res = wait_for_completion_timeout(&context->complete, timeout); + if (!res) { mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); - return -EINTR; + return -ETIMEDOUT; } - if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE) + if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE) res = context->status_code; else res = -EIO; - kfree(context); return res; } +static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec) +{ + if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command)) + return true; + return false; +} + +static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa, + int opcode) +{ + struct mlx5_core_dev *dev = fdev->mdev; + struct mlx5_ifc_fpga_ipsec_sa *sa; + struct mlx5_fpga_ipsec_cmd_context *cmd_context; + size_t sa_cmd_size; + int err; + + hw_sa->ipsec_sa_v1.cmd = htonl(opcode); + if (is_v2_sadb_supported(fdev->ipsec)) + sa_cmd_size = sizeof(*hw_sa); + else + sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1); + + cmd_context = (struct mlx5_fpga_ipsec_cmd_context *) + mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size); + if (IS_ERR(cmd_context)) + return PTR_ERR(cmd_context); + + err = mlx5_fpga_ipsec_cmd_wait(cmd_context); + if (err) + goto out; + + sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command; + if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) { + mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", + ntohl(sa->ipsec_sa_v1.sw_sa_handle), + ntohl(cmd_context->resp.sw_sa_handle)); + err = -EIO; + } + +out: + kfree(cmd_context); + return err; +} + u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; u32 ret = 0; - if (mlx5_fpga_is_ipsec_device(mdev)) - ret |= MLX5_ACCEL_IPSEC_DEVICE; - else + if (mlx5_fpga_is_ipsec_device(mdev)) { + ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; + ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA; + } else { return ret; + } if (!fdev->ipsec) return ret; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp)) - ret |= MLX5_ACCEL_IPSEC_ESP; + ret |= MLX5_ACCEL_IPSEC_CAP_ESP; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6)) - ret |= MLX5_ACCEL_IPSEC_IPV6; + ret |= MLX5_ACCEL_IPSEC_CAP_IPV6; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) - ret |= MLX5_ACCEL_IPSEC_LSO; + ret |= MLX5_ACCEL_IPSEC_CAP_LSO; + + if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) + ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; + + if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) { + ret |= MLX5_ACCEL_IPSEC_CAP_ESN; + ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; + } return ret; } @@ -318,6 +414,829 @@ out: return ret; } +static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) +{ + struct mlx5_fpga_ipsec_cmd_context *context; + struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; + int err; + + cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); + cmd.flags = htonl(flags); + context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); + if (IS_ERR(context)) { + err = PTR_ERR(context); + goto out; + } + + err = mlx5_fpga_ipsec_cmd_wait(context); + if (err) + goto out; + + if ((context->resp.flags & cmd.flags) != cmd.flags) { + mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n", + cmd.flags, + context->resp.flags); + err = -EIO; + } + +out: + return err; +} + +static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) +{ + u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); + u32 flags = 0; + + if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER) + flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; + + return mlx5_fpga_ipsec_set_caps(mdev, flags); +} + +static void +mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa) +{ + const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; + + /* key */ + memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key, + aes_gcm->key_len / 8); + /* Duplicate 128 bit key twice according to HW layout */ + if (aes_gcm->key_len == 128) + memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], + aes_gcm->aes_key, aes_gcm->key_len / 8); + + /* salt and seq_iv */ + memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv, + sizeof(aes_gcm->seq_iv)); + memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, + sizeof(aes_gcm->salt)); + + /* esn */ + if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN; + hw_sa->ipsec_sa_v1.flags |= + (xfrm_attrs->flags & + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; + hw_sa->esn = htonl(xfrm_attrs->esn); + } else { + hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN; + hw_sa->ipsec_sa_v1.flags &= + ~(xfrm_attrs->flags & + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; + hw_sa->esn = 0; + } + + /* rx handle */ + hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); + + /* enc mode */ + switch (aes_gcm->key_len) { + case 128: + hw_sa->ipsec_sa_v1.enc_mode = + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128; + break; + case 256: + hw_sa->ipsec_sa_v1.enc_mode = + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128; + break; + } + + /* flags */ + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID | + MLX5_FPGA_IPSEC_SA_SPI_EN | + MLX5_FPGA_IPSEC_SA_IP_ESP; + + if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT) + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX; + else + hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX; +} + +static void +mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa) +{ + mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa); + + /* IPs */ + memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip)); + memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip)); + + /* SPI */ + hw_sa->ipsec_sa_v1.spi = spi; + + /* flags */ + if (is_ipv6) + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; +} + +static bool is_full_mask(const void *p, size_t len) +{ + WARN_ON(len % 4); + + return !memchr_inv(p, 0xff, len); +} + +static bool validate_fpga_full_mask(struct mlx5_core_dev *dev, + const u32 *match_c, + const u32 *match_v) +{ + const void *misc_params_c = MLX5_ADDR_OF(fte_match_param, + match_c, + misc_parameters); + const void *headers_c = MLX5_ADDR_OF(fte_match_param, + match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, + match_v, + outer_headers); + + if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) { + const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + + if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, + ipv4)) || + !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, + ipv4))) + return false; + } else { + const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6); + const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + + if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)) || + !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6))) + return false; + } + + if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, + outer_esp_spi), + MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi))) + return false; + + return true; +} + +static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev, + u8 match_criteria_enable, + const u32 *match_c, + const u32 *match_v) +{ + u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev); + bool ipv6_flow; + + ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v); + + if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) || + mlx5_fs_is_outer_udp_flow(match_c, match_v) || + mlx5_fs_is_outer_tcp_flow(match_c, match_v) || + mlx5_fs_is_vxlan_flow(match_c) || + !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) || + ipv6_flow)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) && + mlx5_fs_is_outer_ipsec_flow(match_c)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) && + ipv6_flow) + return false; + + if (!validate_fpga_full_mask(dev, match_c, match_v)) + return false; + + return true; +} + +static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, + u8 match_criteria_enable, + const u32 *match_c, + const u32 *match_v, + struct mlx5_flow_act *flow_act) +{ + const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) || + MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0); + bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) || + MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0); + int ret; + + ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c, + match_v); + if (!ret) + return ret; + + if (is_dmac || is_smac || + (match_criteria_enable & + ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || + (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || + flow_act->has_flow_tag) + return false; + + return true; +} + +void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + container_of(accel_xfrm, typeof(*fpga_xfrm), + accel_xfrm); + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + int opcode, err; + void *context; + + /* alloc SA */ + sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); + if (!sa_ctx) + return ERR_PTR(-ENOMEM); + + sa_ctx->dev = mdev; + + /* build candidate SA */ + mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs, + saddr, daddr, spi, is_ipv6, + &sa_ctx->hw_sa); + + mutex_lock(&fpga_xfrm->lock); + + if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */ + /* all rules must be with same IPs and SPI */ + if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa, + sizeof(sa_ctx->hw_sa))) { + context = ERR_PTR(-EINVAL); + goto exists; + } + + ++fpga_xfrm->num_rules; + context = fpga_xfrm->sa_ctx; + goto exists; + } + + /* This is unbounded fpga_xfrm, try to add to hash */ + mutex_lock(&fipsec->sa_hash_lock); + + err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa); + if (err) { + /* Can't bound different accel_xfrm to already existing sa_ctx. + * This is because we can't support multiple ketmats for + * same IPs and SPI + */ + context = ERR_PTR(-EEXIST); + goto unlock_hash; + } + + /* Bound accel_xfrm to sa_ctx */ + opcode = is_v2_sadb_supported(fdev->ipsec) ? + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 : + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA; + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); + sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) { + context = ERR_PTR(err); + goto delete_hash; + } + + mutex_unlock(&fipsec->sa_hash_lock); + + ++fpga_xfrm->num_rules; + fpga_xfrm->sa_ctx = sa_ctx; + sa_ctx->fpga_xfrm = fpga_xfrm; + + mutex_unlock(&fpga_xfrm->lock); + + return sa_ctx; + +delete_hash: + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa)); +unlock_hash: + mutex_unlock(&fipsec->sa_hash_lock); + +exists: + mutex_unlock(&fpga_xfrm->lock); + kfree(sa_ctx); + return context; +} + +static void * +mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev, + struct fs_fte *fte, + bool is_egress) +{ + struct mlx5_accel_esp_xfrm *accel_xfrm; + __be32 saddr[4], daddr[4], spi; + struct mlx5_flow_group *fg; + bool is_ipv6 = false; + + fs_get_obj(fg, fte->node.parent); + /* validate */ + if (is_egress && + !mlx5_is_fpga_egress_ipsec_rule(mdev, + fg->mask.match_criteria_enable, + fg->mask.match_criteria, + fte->val, + &fte->action)) + return ERR_PTR(-EINVAL); + else if (!mlx5_is_fpga_ipsec_rule(mdev, + fg->mask.match_criteria_enable, + fg->mask.match_criteria, + fte->val)) + return ERR_PTR(-EINVAL); + + /* get xfrm context */ + accel_xfrm = + (struct mlx5_accel_esp_xfrm *)fte->action.esp_id; + + /* IPs */ + if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria, + fte->val)) { + memcpy(&saddr[3], + MLX5_ADDR_OF(fte_match_set_lyr_2_4, + fte->val, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + sizeof(saddr[3])); + memcpy(&daddr[3], + MLX5_ADDR_OF(fte_match_set_lyr_2_4, + fte->val, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + sizeof(daddr[3])); + } else { + memcpy(saddr, + MLX5_ADDR_OF(fte_match_param, + fte->val, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(saddr)); + memcpy(daddr, + MLX5_ADDR_OF(fte_match_param, + fte->val, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(daddr)); + is_ipv6 = true; + } + + /* SPI */ + spi = MLX5_GET_BE(typeof(spi), + fte_match_param, fte->val, + misc_parameters.outer_esp_spi); + + /* create */ + return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm, + saddr, daddr, + spi, is_ipv6); +} + +static void +mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) +{ + struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + int opcode = is_v2_sadb_supported(fdev->ipsec) ? + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 : + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA; + int err; + + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); + sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) { + WARN_ON(err); + return; + } + + mutex_lock(&fipsec->sa_hash_lock); + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa)); + mutex_unlock(&fipsec->sa_hash_lock); +} + +void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; + + mutex_lock(&fpga_xfrm->lock); + if (!--fpga_xfrm->num_rules) { + mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); + fpga_xfrm->sa_ctx = NULL; + } + mutex_unlock(&fpga_xfrm->lock); +} + +static inline struct mlx5_fpga_ipsec_rule * +_rule_search(struct rb_root *root, struct fs_fte *fte) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct mlx5_fpga_ipsec_rule *rule = + container_of(node, struct mlx5_fpga_ipsec_rule, + node); + + if (rule->fte < fte) + node = node->rb_left; + else if (rule->fte > fte) + node = node->rb_right; + else + return rule; + } + return NULL; +} + +static struct mlx5_fpga_ipsec_rule * +rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte) +{ + struct mlx5_fpga_ipsec_rule *rule; + + mutex_lock(&ipsec_dev->rules_rb_lock); + rule = _rule_search(&ipsec_dev->rules_rb, fte); + mutex_unlock(&ipsec_dev->rules_rb_lock); + + return rule; +} + +static inline int _rule_insert(struct rb_root *root, + struct mlx5_fpga_ipsec_rule *rule) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct mlx5_fpga_ipsec_rule *this = + container_of(*new, struct mlx5_fpga_ipsec_rule, + node); + + parent = *new; + if (rule->fte < this->fte) + new = &((*new)->rb_left); + else if (rule->fte > this->fte) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&rule->node, parent, new); + rb_insert_color(&rule->node, root); + + return 0; +} + +static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + int ret; + + mutex_lock(&ipsec_dev->rules_rb_lock); + ret = _rule_insert(&ipsec_dev->rules_rb, rule); + mutex_unlock(&ipsec_dev->rules_rb_lock); + + return ret; +} + +static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + struct rb_root *root = &ipsec_dev->rules_rb; + + mutex_lock(&ipsec_dev->rules_rb_lock); + rb_erase(&rule->node, root); + mutex_unlock(&ipsec_dev->rules_rb_lock); +} + +static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + _rule_delete(ipsec_dev, rule); + kfree(rule); +} + +struct mailbox_mod { + uintptr_t saved_esp_id; + u32 saved_action; + u32 saved_outer_esp_spi_value; +}; + +static void restore_spec_mailbox(struct fs_fte *fte, + struct mailbox_mod *mbox_mod) +{ + char *misc_params_v = MLX5_ADDR_OF(fte_match_param, + fte->val, + misc_parameters); + + MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, + mbox_mod->saved_outer_esp_spi_value); + fte->action.action |= mbox_mod->saved_action; + fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id; +} + +static void modify_spec_mailbox(struct mlx5_core_dev *mdev, + struct fs_fte *fte, + struct mailbox_mod *mbox_mod) +{ + char *misc_params_v = MLX5_ADDR_OF(fte_match_param, + fte->val, + misc_parameters); + + mbox_mod->saved_esp_id = fte->action.esp_id; + mbox_mod->saved_action = fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT); + mbox_mod->saved_outer_esp_spi_value = + MLX5_GET(fte_match_set_misc, misc_params_v, + outer_esp_spi); + + fte->action.esp_id = 0; + fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT); + if (!MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) + MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0); +} + +static enum fs_flow_table_type egress_to_fs_ft(bool egress) +{ + return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; +} + +static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id, + bool is_egress) +{ + int (*create_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, u32 *in, + unsigned int *group_id) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; + char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria.misc_parameters); + u32 saved_outer_esp_spi_mask; + u8 match_criteria_enable; + int ret; + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) + return create_flow_group(dev, ft, in, group_id); + + match_criteria_enable = + MLX5_GET(create_flow_group_in, in, match_criteria_enable); + saved_outer_esp_spi_mask = + MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); + if (!match_criteria_enable || !saved_outer_esp_spi_mask) + return create_flow_group(dev, ft, in, group_id); + + MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); + + if (!(*misc_params_c) && + !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) + MLX5_SET(create_flow_group_in, in, match_criteria_enable, + match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); + + ret = create_flow_group(dev, ft, in, group_id); + + MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); + MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); + + return ret; +} + +static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte, + bool is_egress) +{ + int (*create_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; + struct mlx5_fpga_device *fdev = dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_ipsec_rule *rule; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return create_fte(dev, ft, fg, fte); + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); + if (IS_ERR(rule->ctx)) { + int err = PTR_ERR(rule->ctx); + kfree(rule); + return err; + } + + rule->fte = fte; + WARN_ON(rule_insert(fipsec, rule)); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = create_fte(dev, ft, fg, fte); + restore_spec_mailbox(fte, &mbox_mod); + if (ret) { + _rule_delete(fipsec, rule); + mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); + kfree(rule); + } + + return ret; +} + +static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte, + bool is_egress) +{ + int (*update_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return update_fte(dev, ft, group_id, modify_mask, fte); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = update_fte(dev, ft, group_id, modify_mask, fte); + restore_spec_mailbox(fte, &mbox_mod); + + return ret; +} + +static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte, + bool is_egress) +{ + int (*delete_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; + struct mlx5_fpga_device *fdev = dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_ipsec_rule *rule; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return delete_fte(dev, ft, fte); + + rule = rule_search(fipsec, fte); + if (!rule) + return -ENOENT; + + mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); + rule_delete(fipsec, rule); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = delete_fte(dev, ft, fte); + restore_spec_mailbox(fte, &mbox_mod); + + return ret; +} + +static int +mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true); +} + +static int +mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true); +} + +static int +mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + true); +} + +static int +mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_delete_fte(dev, ft, fte, true); +} + +static int +mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false); +} + +static int +mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false); +} + +static int +mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + false); +} + +static int +mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_delete_fte(dev, ft, fte, false); +} + +static struct mlx5_flow_cmds fpga_ipsec_ingress; +static struct mlx5_flow_cmds fpga_ipsec_egress; + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) +{ + switch (type) { + case FS_FT_NIC_RX: + return &fpga_ipsec_ingress; + case FS_FT_NIC_TX: + return &fpga_ipsec_egress; + default: + WARN_ON(true); + return NULL; + } +} + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; @@ -332,6 +1251,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) if (!fdev->ipsec) return -ENOMEM; + fdev->ipsec->fdev = fdev; + err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps), fdev->ipsec->caps); if (err) { @@ -355,14 +1276,47 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) goto error; } fdev->ipsec->conn = conn; + + err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa); + if (err) + goto err_destroy_conn; + mutex_init(&fdev->ipsec->sa_hash_lock); + + fdev->ipsec->rules_rb = RB_ROOT; + mutex_init(&fdev->ipsec->rules_rb_lock); + + err = mlx5_fpga_ipsec_enable_supported_caps(mdev); + if (err) { + mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", + err); + goto err_destroy_hash; + } + return 0; +err_destroy_hash: + rhashtable_destroy(&fdev->ipsec->sa_hash); + +err_destroy_conn: + mlx5_fpga_sbu_conn_destroy(conn); + error: kfree(fdev->ipsec); fdev->ipsec = NULL; return err; } +static void destroy_rules_rb(struct rb_root *root) +{ + struct mlx5_fpga_ipsec_rule *r, *tmp; + + rbtree_postorder_for_each_entry_safe(r, tmp, root, node) { + rb_erase(&r->node, root); + mlx5_fpga_ipsec_delete_sa_ctx(r->ctx); + kfree(r); + } +} + void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -370,7 +1324,209 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) if (!mlx5_fpga_is_ipsec_device(mdev)) return; + destroy_rules_rb(&fdev->ipsec->rules_rb); + rhashtable_destroy(&fdev->ipsec->sa_hash); + mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); kfree(fdev->ipsec); fdev->ipsec = NULL; } + +void mlx5_fpga_ipsec_build_fs_cmds(void) +{ + /* ingress */ + fpga_ipsec_ingress.create_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table; + fpga_ipsec_ingress.destroy_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table; + fpga_ipsec_ingress.modify_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table; + fpga_ipsec_ingress.create_flow_group = + mlx5_fpga_ipsec_fs_create_flow_group_ingress; + fpga_ipsec_ingress.destroy_flow_group = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group; + fpga_ipsec_ingress.create_fte = + mlx5_fpga_ipsec_fs_create_fte_ingress; + fpga_ipsec_ingress.update_fte = + mlx5_fpga_ipsec_fs_update_fte_ingress; + fpga_ipsec_ingress.delete_fte = + mlx5_fpga_ipsec_fs_delete_fte_ingress; + fpga_ipsec_ingress.update_root_ft = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft; + + /* egress */ + fpga_ipsec_egress.create_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table; + fpga_ipsec_egress.destroy_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table; + fpga_ipsec_egress.modify_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table; + fpga_ipsec_egress.create_flow_group = + mlx5_fpga_ipsec_fs_create_flow_group_egress; + fpga_ipsec_egress.destroy_flow_group = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group; + fpga_ipsec_egress.create_fte = + mlx5_fpga_ipsec_fs_create_fte_egress; + fpga_ipsec_egress.update_fte = + mlx5_fpga_ipsec_fs_update_fte_egress; + fpga_ipsec_egress.delete_fte = + mlx5_fpga_ipsec_fs_delete_fte_egress; + fpga_ipsec_egress.update_root_ft = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft; +} + +static int +mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + if (attrs->tfc_pad) { + mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n"); + return -EOPNOTSUPP; + } + + if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { + mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { + mlx5_core_err(mdev, "Only aes gcm keymat is supported\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.iv_algo != + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { + mlx5_core_err(mdev, "Only iv sequence algo is supported\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.icv_len != 128) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.key_len != 128 && + attrs->keymat.aes_gcm.key_len != 256) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); + return -EOPNOTSUPP; + } + + if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && + (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps, + v2_command))) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm; + + if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) { + mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n"); + return ERR_PTR(-EINVAL); + } + + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); + return ERR_PTR(-EOPNOTSUPP); + } + + fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL); + if (!fpga_xfrm) + return ERR_PTR(-ENOMEM); + + mutex_init(&fpga_xfrm->lock); + memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs, + sizeof(fpga_xfrm->accel_xfrm.attrs)); + + return &fpga_xfrm->accel_xfrm; +} + +void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + container_of(xfrm, struct mlx5_fpga_esp_xfrm, + accel_xfrm); + /* assuming no sa_ctx are connected to this xfrm_ctx */ + kfree(fpga_xfrm); +} + +int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct mlx5_core_dev *mdev = xfrm->mdev; + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_esp_xfrm *fpga_xfrm; + struct mlx5_ifc_fpga_ipsec_sa org_hw_sa; + + int err = 0; + + if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) + return 0; + + if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); + return -EOPNOTSUPP; + } + + if (is_v2_sadb_supported(fipsec)) { + mlx5_core_warn(mdev, "Modify esp is not supported\n"); + return -EOPNOTSUPP; + } + + fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm); + + mutex_lock(&fpga_xfrm->lock); + + if (!fpga_xfrm->sa_ctx) + /* Unbounded xfrm, chane only sw attrs */ + goto change_sw_xfrm_attrs; + + /* copy original hw sa */ + memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa)); + mutex_lock(&fipsec->sa_hash_lock); + /* remove original hw sa from hash */ + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, rhash_sa)); + /* update hw_sa with new xfrm attrs*/ + mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs, + &fpga_xfrm->sa_ctx->hw_sa); + /* try to insert new hw_sa to hash */ + err = rhashtable_insert_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, rhash_sa); + if (err) + goto rollback_sa; + + /* modify device with new hw_sa */ + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa, + MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2); + fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, + rhash_sa)); +rollback_sa: + if (err) { + /* return original hw_sa to hash */ + memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa, + sizeof(org_hw_sa)); + WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, + rhash_sa)); + } + mutex_unlock(&fipsec->sa_hash_lock); + +change_sw_xfrm_attrs: + if (!err) + memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); + mutex_unlock(&fpga_xfrm->lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index 26a3e4b56972..2b5e63b0d4d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -35,33 +35,38 @@ #define __MLX5_FPGA_IPSEC_H__ #include "accel/ipsec.h" +#include "fs_cmd.h" #ifdef CONFIG_MLX5_FPGA -void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd); -int mlx5_fpga_ipsec_sa_cmd_wait(void *context); - u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int counters_count); +void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6); +void mlx5_fpga_ipsec_delete_sa_ctx(void *context); + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); +void mlx5_fpga_ipsec_build_fs_cmds(void); -#else +struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); +void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); -static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) -{ - return ERR_PTR(-EOPNOTSUPP); -} +const struct mlx5_flow_cmds * +mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); -static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context) -{ - return -EOPNOTSUPP; -} +#else static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { @@ -80,6 +85,20 @@ static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, return 0; } +static inline void * +mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return NULL; +} + +static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +{ +} + static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { return 0; @@ -89,6 +108,35 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { } +static inline void mlx5_fpga_ipsec_build_fs_cmds(void) +{ +} + +static inline struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ +} + +static inline int +mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return -EOPNOTSUPP; +} + +static inline const struct mlx5_flow_cmds * +mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) +{ + return mlx5_fs_cmd_get_default(type); +} + #endif /* CONFIG_MLX5_FPGA */ #endif /* __MLX5_FPGA_SADB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 881e2e55840c..ef5afd7c9325 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -39,9 +39,81 @@ #include "mlx5_core.h" #include "eswitch.h" -int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, u32 underlay_qpn, - bool disconnect) +static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect) +{ + return 0; +} + +static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, + unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags) +{ + return 0; +} + +static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + return 0; +} + +static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + return 0; +} + +static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return 0; +} + +static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) +{ + return 0; +} + +static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) +{ + return 0; +} + +static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return -EOPNOTSUPP; +} + +static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + return 0; +} + +static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, u32 underlay_qpn, + bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; @@ -71,12 +143,14 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id, u32 flags) +static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, + unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags) { int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; @@ -125,8 +199,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, return err; } -int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft) +static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) { u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; @@ -143,9 +217,9 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - struct mlx5_flow_table *next_ft) +static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) { u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; @@ -188,10 +262,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - u32 *in, - unsigned int *group_id) +static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) { u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -213,9 +287,9 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, return err; } -int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int group_id) +static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) { u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; @@ -243,7 +317,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; struct mlx5_flow_rule *dst; - void *in_flow_context; + void *in_flow_context, *vlan; void *in_match_value; void *in_dests; u32 *in; @@ -266,16 +340,25 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); - MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); - MLX5_SET(flow_context, in_flow_context, action, fte->action); - MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id); - MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id); + + MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); + MLX5_SET(flow_context, in_flow_context, action, fte->action.action); + MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); + MLX5_SET(flow_context, in_flow_context, modify_header_id, + fte->action.modify_id); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio); + in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, sizeof(fte->val)); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -301,7 +384,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_flow_counter, ft->type)); @@ -332,19 +415,21 @@ err_out: return err; } -int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - struct fs_fte *fte) +static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) { + unsigned int group_id = group->id; + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); } -int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - int modify_mask, - struct fs_fte *fte) +static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) { int opmod; int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, @@ -357,9 +442,9 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); } -int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int index) +static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) { u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; @@ -367,7 +452,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_id, ft->id); - MLX5_SET(delete_fte_in, in, flow_index, index); + MLX5_SET(delete_fte_in, in, flow_index, fte->index); if (ft->vport) { MLX5_SET(delete_fte_in, in, vport_number, ft->vport); MLX5_SET(delete_fte_in, in, other_vport, 1); @@ -610,3 +695,53 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } + +static const struct mlx5_flow_cmds mlx5_flow_cmds = { + .create_flow_table = mlx5_cmd_create_flow_table, + .destroy_flow_table = mlx5_cmd_destroy_flow_table, + .modify_flow_table = mlx5_cmd_modify_flow_table, + .create_flow_group = mlx5_cmd_create_flow_group, + .destroy_flow_group = mlx5_cmd_destroy_flow_group, + .create_fte = mlx5_cmd_create_fte, + .update_fte = mlx5_cmd_update_fte, + .delete_fte = mlx5_cmd_delete_fte, + .update_root_ft = mlx5_cmd_update_root_ft, +}; + +static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { + .create_flow_table = mlx5_cmd_stub_create_flow_table, + .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table, + .modify_flow_table = mlx5_cmd_stub_modify_flow_table, + .create_flow_group = mlx5_cmd_stub_create_flow_group, + .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group, + .create_fte = mlx5_cmd_stub_create_fte, + .update_fte = mlx5_cmd_stub_update_fte, + .delete_fte = mlx5_cmd_stub_delete_fte, + .update_root_ft = mlx5_cmd_stub_update_root_ft, +}; + +static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) +{ + return &mlx5_flow_cmds; +} + +static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void) +{ + return &mlx5_flow_cmd_stubs; +} + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type) +{ + switch (type) { + case FS_FT_NIC_RX: + case FS_FT_ESW_EGRESS_ACL: + case FS_FT_ESW_INGRESS_ACL: + case FS_FT_FDB: + case FS_FT_SNIFFER_RX: + case FS_FT_SNIFFER_TX: + return mlx5_fs_cmd_get_fw_cmds(); + case FS_FT_NIC_TX: + default: + return mlx5_fs_cmd_get_stub_cmds(); + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 71e2d0f37ad9..6228ba7bfa1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -33,46 +33,52 @@ #ifndef _MLX5_FS_CMD_ #define _MLX5_FS_CMD_ -int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id, u32 flags); +#include "fs_core.h" -int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft); +struct mlx5_flow_cmds { + int (*create_flow_table)(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags); + int (*destroy_flow_table)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); -int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - struct mlx5_flow_table *next_ft); + int (*modify_flow_table)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft); -int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - u32 *in, unsigned int *group_id); + int (*create_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id); -int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int group_id); + int (*destroy_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id); -int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - struct fs_fte *fte); + int (*create_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte); -int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - int modify_mask, - struct fs_fte *fte); + int (*update_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte); -int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int index); + int (*delete_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte); -int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, u32 underlay_qpn, - bool disconnect); + int (*update_root_ft)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect); +}; int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); @@ -90,4 +96,6 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes); +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c025c98700e4..de51e7c39bc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -37,6 +37,8 @@ #include "fs_core.h" #include "fs_cmd.h" #include "diag/fs_tracepoint.h" +#include "accel/ipsec.h" +#include "fpga/ipsec.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -425,15 +427,17 @@ static void del_sw_prio(struct fs_node *node) static void del_hw_flow_table(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; int err; fs_get_obj(ft, node); dev = get_dev(&ft->node); + root = find_root(&ft->node); if (node->active) { - err = mlx5_cmd_destroy_flow_table(dev, ft); + err = root->cmds->destroy_flow_table(dev, ft); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); } @@ -454,6 +458,7 @@ static void del_sw_flow_table(struct fs_node *node) static void del_sw_hw_rule(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_rule *rule; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; @@ -477,19 +482,20 @@ static void del_sw_hw_rule(struct fs_node *node) if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && --fte->dests_size) { modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); - fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; update_fte = true; goto out; } - if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && + if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), update_fte = true; } out: + root = find_root(&ft->node); if (update_fte && fte->dests_size) { - err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte); + err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", @@ -500,6 +506,7 @@ out: static void del_hw_fte(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct mlx5_core_dev *dev; @@ -512,9 +519,9 @@ static void del_hw_fte(struct fs_node *node) trace_mlx5_fs_del_fte(fte); dev = get_dev(&ft->node); + root = find_root(&ft->node); if (node->active) { - err = mlx5_cmd_delete_fte(dev, ft, - fte->index); + err = root->cmds->delete_fte(dev, ft, fte); if (err) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", @@ -542,6 +549,7 @@ static void del_sw_fte(struct fs_node *node) static void del_hw_flow_group(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_group *fg; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; @@ -551,7 +559,8 @@ static void del_hw_flow_group(struct fs_node *node) dev = get_dev(&ft->node); trace_mlx5_fs_del_fg(fg); - if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + root = find_root(&ft->node); + if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); } @@ -615,10 +624,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, memcpy(fte->val, match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; - fte->flow_tag = flow_act->flow_tag; - fte->action = flow_act->action; - fte->encap_id = flow_act->encap_id; - fte->modify_id = flow_act->modify_id; + fte->action = *flow_act; tree_init_node(&fte->node, del_hw_fte, del_sw_fte); @@ -797,15 +803,14 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, struct fs_prio *prio, struct mlx5_flow_table *ft) { + struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_table *iter; int i = 0; int err; fs_for_each_ft(iter, prio) { i++; - err = mlx5_cmd_modify_flow_table(dev, - iter, - ft); + err = root->cmds->modify_flow_table(dev, iter, ft); if (err) { mlx5_core_warn(dev, "Failed to modify flow table %d\n", iter->id); @@ -853,12 +858,12 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false); + err = root->cmds->update_root_ft(root->dev, ft, qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, - false); + err = root->cmds->update_root_ft(root->dev, ft, + qpn, false); if (err) break; } @@ -877,6 +882,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; @@ -884,17 +890,16 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, int err = 0; fs_get_obj(fte, rule->node.parent); - if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return -EINVAL; down_write_ref_node(&fte->node); fs_get_obj(fg, fte->node.parent); fs_get_obj(ft, fg->node.parent); memcpy(&rule->dest_attr, dest, sizeof(*dest)); - err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, - modify_mask, - fte); + root = find_root(&ft->node); + err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, + modify_mask, fte); up_write_ref_node(&fte->node); return err; @@ -1035,9 +1040,9 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, - ft->level, log_table_sz, next_ft, &ft->id, - ft->flags); + err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod, + ft->type, ft->level, log_table_sz, + next_ft, &ft->id, ft->flags); if (err) goto free_ft; @@ -1053,7 +1058,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa mutex_unlock(&root->chain_lock); return ft; destroy_ft: - mlx5_cmd_destroy_flow_table(root->dev, ft); + root->cmds->destroy_flow_table(root->dev, ft); free_ft: kfree(ft); unlock_root: @@ -1125,6 +1130,7 @@ EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *fg_in) { + struct mlx5_flow_root_namespace *root = find_root(&ft->node); void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, fg_in, match_criteria); u8 match_criteria_enable = MLX5_GET(create_flow_group_in, @@ -1152,7 +1158,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, if (IS_ERR(fg)) return fg; - err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); + err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); if (err) { tree_put_node(&fg->node); return ERR_PTR(err); @@ -1275,6 +1281,7 @@ add_rule_fte(struct fs_fte *fte, int dest_num, bool update_action) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_handle *handle; struct mlx5_flow_table *ft; int modify_mask = 0; @@ -1290,12 +1297,13 @@ add_rule_fte(struct fs_fte *fte, modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); fs_get_obj(ft, fg->node.parent); + root = find_root(&fg->node); if (!(fte->status & FS_FTE_STATUS_EXISTING)) - err = mlx5_cmd_create_fte(get_dev(&ft->node), - ft, fg->id, fte); + err = root->cmds->create_fte(get_dev(&ft->node), + ft, fg, fte); else - err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, modify_mask, fte); + err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, + modify_mask, fte); if (err) goto free_handle; @@ -1360,6 +1368,7 @@ out: static int create_auto_flow_group(struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { + struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_core_dev *dev = get_dev(&ft->node); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; @@ -1380,7 +1389,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, memcpy(match_criteria_addr, fg->mask.match_criteria, sizeof(fg->mask.match_criteria)); - err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id); + err = root->cmds->create_flow_group(dev, ft, in, &fg->id); if (!err) { fg->node.active = true; trace_mlx5_fs_add_fg(fg); @@ -1429,7 +1438,10 @@ static bool check_conflicting_actions(u32 action1, u32 action2) if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_ENCAP | - MLX5_FLOW_CONTEXT_ACTION_DECAP)) + MLX5_FLOW_CONTEXT_ACTION_DECAP | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) return true; return false; @@ -1437,16 +1449,17 @@ static bool check_conflicting_actions(u32 action1, u32 action2) static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action)) { + if (check_conflicting_actions(flow_act->action, fte->action.action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; } - if (fte->flow_tag != flow_act->flow_tag) { + if (flow_act->has_flow_tag && + fte->action.flow_tag != flow_act->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", - fte->flow_tag, + fte->action.flow_tag, flow_act->flow_tag); return -EEXIST; } @@ -1470,12 +1483,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, if (ret) return ERR_PTR(ret); - old_action = fte->action; - fte->action |= flow_act->action; + old_action = fte->action.action; + fte->action.action |= flow_act->action; handle = add_rule_fte(fte, fg, dest, dest_num, old_action != flow_act->action); if (IS_ERR(handle)) { - fte->action = old_action; + fte->action.action = old_action; return handle; } trace_mlx5_fs_set_fte(fte, false); @@ -1636,7 +1649,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, list_for_each_entry(iter, match_head, list) { nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); - ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL); } search_again_locked: @@ -1758,8 +1770,11 @@ search_again_locked: /* Collect all fgs which has a matching match_criteria */ err = build_match_list(&match_head, ft, spec); - if (err) + if (err) { + if (take_write) + up_write_ref_node(&ft->node); return ERR_PTR(err); + } if (!take_write) up_read_ref_node(&ft->node); @@ -1768,8 +1783,11 @@ search_again_locked: dest_num, version); free_match_list(&match_head); if (!IS_ERR(rule) || - (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) + (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { + if (take_write) + up_write_ref_node(&ft->node); return rule; + } if (!take_write) { nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); @@ -1912,7 +1930,6 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) return 0; new_root_ft = find_next_ft(ft); - if (!new_root_ft) { root->root_ft = NULL; return 0; @@ -1921,13 +1938,14 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn, - false); + err = root->cmds->update_root_ft(root->dev, new_root_ft, + qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, - qpn, false); + err = root->cmds->update_root_ft(root->dev, + new_root_ft, qpn, + false); if (err) break; } @@ -2039,6 +2057,11 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->sniffer_tx_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_EGRESS: + if (steering->egress_root_ns) + return &steering->egress_root_ns->ns; + else + return NULL; default: return NULL; } @@ -2229,13 +2252,18 @@ static int init_root_tree(struct mlx5_flow_steering *steering, return 0; } -static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering, - enum fs_flow_table_type - table_type) +static struct mlx5_flow_root_namespace +*create_root_ns(struct mlx5_flow_steering *steering, + enum fs_flow_table_type table_type) { + const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type); struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; + if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && + (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) + cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); + /* Create the root namespace */ root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) @@ -2243,6 +2271,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering root_ns->dev = steering->dev; root_ns->table_type = table_type; + root_ns->cmds = cmds; INIT_LIST_HEAD(&root_ns->underlay_qpns); @@ -2401,6 +2430,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->fdb_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); + cleanup_root_ns(steering->egress_root_ns); mlx5_cleanup_fc_stats(dev); kmem_cache_destroy(steering->ftes_cache); kmem_cache_destroy(steering->fgs_cache); @@ -2546,6 +2576,20 @@ cleanup_root_ns: return err; } +static int init_egress_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->egress_root_ns = create_root_ns(steering, + FS_FT_NIC_TX); + if (!steering->egress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1); + return PTR_ERR_OR_ZERO(prio); +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering; @@ -2611,6 +2655,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } + if (MLX5_IPSEC_DEV(dev)) { + err = init_egress_root_ns(steering); + if (err) + goto err; + } + return 0; err: mlx5_cleanup_fs(dev); @@ -2634,7 +2684,8 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto update_ft_fail; } - err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false); + err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + false); if (err) { mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", underlay_qpn, err); @@ -2677,7 +2728,8 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto out; } - err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true); + err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + true); if (err) mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", underlay_qpn, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 05262708f14b..e26d3e9d5f9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -48,6 +48,7 @@ enum fs_node_type { enum fs_flow_table_type { FS_FT_NIC_RX = 0x0, + FS_FT_NIC_TX = 0x1, FS_FT_ESW_EGRESS_ACL = 0x2, FS_FT_ESW_INGRESS_ACL = 0x3, FS_FT_FDB = 0X4, @@ -75,6 +76,7 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; + struct mlx5_flow_root_namespace *egress_root_ns; }; struct fs_node { @@ -174,11 +176,8 @@ struct fs_fte { struct fs_node node; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; - u32 flow_tag; u32 index; - u32 action; - u32 encap_id; - u32 modify_id; + struct mlx5_flow_act action; enum fs_fte_status status; struct mlx5_fc *counter; struct rhash_head hash; @@ -224,6 +223,7 @@ struct mlx5_flow_root_namespace { /* Should be held when chaining flow tables */ struct mutex chain_lock; struct list_head underlay_qpns; + const struct mlx5_flow_cmds *cmds; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 9d11e92fb541..afd9f4fa22f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -183,6 +183,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, debug)) + mlx5_core_get_caps(dev, MLX5_CAP_DEBUG); + if (MLX5_CAP_GEN(dev, pcam_reg)) mlx5_get_pcam_reg(dev); @@ -192,6 +195,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, qcam_reg)) mlx5_get_qcam_reg(dev); + if (MLX5_CAP_GEN(dev, device_memory)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM); + if (err) + return err; + } + return 0; } @@ -242,7 +251,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) force_state = MLX5_GET(teardown_hca_out, out, force_state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { - mlx5_core_err(dev, "teardown with force mode failed\n"); + mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) trigger_cmd_completions(dev); } - mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); + mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mlx5_core_err(dev, "end\n"); unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index f953378bd13d..af3bb2f7a504 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -56,14 +56,17 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ - mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ - params->log_rq_size = is_kdump_kernel() ? + params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; params->lro_en = false; + params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; } /* Called directly after IPoIB netdevice was created to initialize SW structs */ @@ -79,10 +82,10 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; - priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; mutex_init(&priv->state_lock); - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); @@ -366,25 +369,27 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; mutex_lock(&priv->state_lock); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + params = &priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + params->sw_mtu = new_mtu; + netdev->mtu = params->sw_mtu; goto out; + } - new_channels.params = priv->channels.params; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, NULL); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -538,7 +543,7 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); if (err) - mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n", + mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index b69e9d847a6b..54a188f41f90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops; /* Use dummy rqs */ - priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index e159243e0fcf..857035583ccd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -34,6 +34,7 @@ #include <linux/highmem.h> #include <rdma/mlx5-abi.h> #include "en.h" +#include "clock.h" enum { MLX5_CYCLES_SHIFT = 23 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 622f02d34aae..63a8ea31601c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -58,6 +58,7 @@ #include "eswitch.h" #include "lib/mlx5.h" #include "fpga/core.h" +#include "fpga/ipsec.h" #include "accel/ipsec.h" #include "lib/clock.h" @@ -551,7 +552,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) MLX5_SET(cmd_hca_cap, set_hca_cap, cache_line_128byte, - cache_line_size() == 128 ? 1 : 0); + cache_line_size() >= 128 ? 1 : 0); if (MLX5_CAP_GEN_MAX(dev, dct)) MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); @@ -942,9 +943,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } - err = mlx5_init_cq_table(dev); + err = mlx5_cq_debugfs_init(dev); if (err) { - dev_err(&pdev->dev, "failed to initialize cq table\n"); + dev_err(&pdev->dev, "failed to initialize cq debugfs\n"); goto err_eq_cleanup; } @@ -1002,7 +1003,7 @@ err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); - mlx5_cleanup_cq_table(dev); + mlx5_cq_debugfs_cleanup(dev); err_eq_cleanup: mlx5_eq_cleanup(dev); @@ -1023,7 +1024,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); - mlx5_cleanup_cq_table(dev); + mlx5_cq_debugfs_cleanup(dev); mlx5_eq_cleanup(dev); } @@ -1177,6 +1178,18 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_affinity_hints; } + err = mlx5_fpga_device_start(dev); + if (err) { + dev_err(&pdev->dev, "fpga device start failed %d\n", err); + goto err_fpga_start; + } + + err = mlx5_accel_ipsec_init(dev); + if (err) { + dev_err(&pdev->dev, "IPSec device start failed %d\n", err); + goto err_ipsec_start; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1195,17 +1208,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_sriov; } - err = mlx5_fpga_device_start(dev); - if (err) { - dev_err(&pdev->dev, "fpga device start failed %d\n", err); - goto err_fpga_start; - } - err = mlx5_accel_ipsec_init(dev); - if (err) { - dev_err(&pdev->dev, "IPSec device start failed %d\n", err); - goto err_ipsec_start; - } - if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1223,17 +1225,18 @@ out: return 0; err_reg_dev: - mlx5_accel_ipsec_cleanup(dev); -err_ipsec_start: - mlx5_fpga_device_stop(dev); - -err_fpga_start: mlx5_sriov_detach(dev); err_sriov: mlx5_cleanup_fs(dev); err_fs: + mlx5_accel_ipsec_cleanup(dev); + +err_ipsec_start: + mlx5_fpga_device_stop(dev); + +err_fpga_start: mlx5_irq_clear_affinity_hints(dev); err_affinity_hints: @@ -1300,11 +1303,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (mlx5_device_registered(dev)) mlx5_detach_device(dev); - mlx5_accel_ipsec_cleanup(dev); - mlx5_fpga_device_stop(dev); - mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_accel_ipsec_cleanup(dev); + mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); @@ -1661,6 +1663,7 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); + mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 394552f36fcf..7d001fe6e631 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -38,16 +38,11 @@ #include <linux/sched.h> #include <linux/if_link.h> #include <linux/firmware.h> +#include <linux/mlx5/cq.h> #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "5.0-0" -#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) -#define MLX5_VPORT_MANAGER(mdev) \ - (MLX5_CAP_GEN(mdev, vport_group_manager) && \ - (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ - mlx5_core_is_pf(mdev)) - extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ @@ -55,6 +50,11 @@ extern uint mlx5_core_debug_mask; __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_dbg_once(__dev, format, ...) \ + dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_dbg_mask(__dev, mask, format, ...) \ do { \ if ((mask) & mlx5_core_debug_mask) \ @@ -115,9 +115,29 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); + +int mlx5_eq_init(struct mlx5_core_dev *dev); +void mlx5_eq_cleanup(struct mlx5_core_dev *dev); +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, + enum mlx5_eq_type type); +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + u32 *out, int outlen); +int mlx5_start_eqs(struct mlx5_core_dev *dev); +void mlx5_stop_eqs(struct mlx5_core_dev *dev); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); void mlx5_cq_tasklet_cb(unsigned long data); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); @@ -186,4 +206,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev); +void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index c37d00cd472a..fa9d0760dd36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -483,6 +483,17 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); +static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out, + u32 out_size) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; + + MLX5_SET(pfcc_reg, in, local_port, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + out_size, MLX5_REG_PFCC, 0, 0); +} + int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; @@ -500,13 +511,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PFCC, 0, 0); + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); if (err) return err; @@ -520,6 +528,49 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_pause); +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pptx_mask_n, 1); + MLX5_SET(pfcc_reg, in, pprx_mask_n, 1); + MLX5_SET(pfcc_reg, in, ppan_mask_n, 1); + MLX5_SET(pfcc_reg, in, critical_stall_mask, 1); + MLX5_SET(pfcc_reg, in, minor_stall_mask, 1); + MLX5_SET(pfcc_reg, in, device_stall_critical_watermark, + stall_critical_watermark); + MLX5_SET(pfcc_reg, in, device_stall_minor_watermark, stall_minor_watermark); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); +} + +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, + u16 *stall_minor_watermark) +{ + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); + if (err) + return err; + + if (stall_critical_watermark) + *stall_critical_watermark = MLX5_GET(pfcc_reg, out, + device_stall_critical_watermark); + + if (stall_minor_watermark) + *stall_minor_watermark = MLX5_GET(pfcc_reg, out, + device_stall_minor_watermark); + + return 0; +} + int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; @@ -538,13 +589,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PFCC, 0, 0); + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index d3c33e9eea72..bc86dffdc43c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -107,16 +107,16 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, * If the table is full, return NULL */ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, - u32 rate) + struct mlx5_rate_limit *rl) { struct mlx5_rl_entry *ret_entry = NULL; bool empty_found = false; int i; for (i = 0; i < table->max_size; i++) { - if (table->rl_entry[i].rate == rate) + if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) return &table->rl_entry[i]; - if (!empty_found && !table->rl_entry[i].rate) { + if (!empty_found && !table->rl_entry[i].rl.rate) { empty_found = true; ret_entry = &table->rl_entry[i]; } @@ -126,7 +126,8 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, } static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, - u32 rate, u16 index) + u16 index, + struct mlx5_rate_limit *rl) { u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; @@ -134,7 +135,9 @@ static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, MLX5_SET(set_pp_rate_limit_in, in, opcode, MLX5_CMD_OP_SET_PP_RATE_LIMIT); MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); - MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate); + MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz); + MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -146,7 +149,17 @@ bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) } EXPORT_SYMBOL(mlx5_rl_is_in_range); -int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) +bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, + struct mlx5_rate_limit *rl_1) +{ + return ((rl_0->rate == rl_1->rate) && + (rl_0->max_burst_sz == rl_1->max_burst_sz) && + (rl_0->typical_pkt_sz == rl_1->typical_pkt_sz)); +} +EXPORT_SYMBOL(mlx5_rl_are_equal); + +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, + struct mlx5_rate_limit *rl) { struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_entry *entry; @@ -154,14 +167,14 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) mutex_lock(&table->rl_lock); - if (!rate || !mlx5_rl_is_in_range(dev, rate)) { + if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) { mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", - rate, table->min_rate, table->max_rate); + rl->rate, table->min_rate, table->max_rate); err = -EINVAL; goto out; } - entry = find_rl_entry(table, rate); + entry = find_rl_entry(table, rl); if (!entry) { mlx5_core_err(dev, "Max number of %u rates reached\n", table->max_size); @@ -173,13 +186,15 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) entry->refcount++; } else { /* new rate limit */ - err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index); + err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); if (err) { - mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", - rate, err); + mlx5_core_err(dev, "Failed configuring rate limit(err %d): \ + rate %u, max_burst_sz %u, typical_pkt_sz %u\n", + err, rl->rate, rl->max_burst_sz, + rl->typical_pkt_sz); goto out; } - entry->rate = rate; + entry->rl = *rl; entry->refcount = 1; } *index = entry->index; @@ -190,27 +205,30 @@ out: } EXPORT_SYMBOL(mlx5_rl_add_rate); -void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) { struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_entry *entry = NULL; + struct mlx5_rate_limit reset_rl = {0}; /* 0 is a reserved value for unlimited rate */ - if (rate == 0) + if (rl->rate == 0) return; mutex_lock(&table->rl_lock); - entry = find_rl_entry(table, rate); + entry = find_rl_entry(table, rl); if (!entry || !entry->refcount) { - mlx5_core_warn(dev, "Rate %u is not configured\n", rate); + mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \ + are not configured\n", + rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); goto out; } entry->refcount--; if (!entry->refcount) { /* need to remove rate */ - mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index); - entry->rate = 0; + mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl); + entry->rl = reset_rl; } out: @@ -257,13 +275,14 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev) void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) { struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rate_limit rl = {0}; int i; /* Clear all configured rates */ for (i = 0; i < table->max_size; i++) - if (table->rl_entry[i].rate) - mlx5_set_pp_rate_limit_cmd(dev, 0, - table->rl_entry[i].index); + if (table->rl_entry[i].rl.rate) + mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index, + &rl); kfree(dev->priv.rl_table.rl_entry); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 9e38343a951f..dae1c5c5d27c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -157,6 +157,31 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) } EXPORT_SYMBOL(mlx5_core_query_sq); +int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state) +{ + void *out; + void *sqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(query_sq_out); + out = kvzalloc(inlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_sq(dev, sqn, out); + if (err) + goto out; + + sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context); + *state = MLX5_GET(sqc, sqc, state); + +out: + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); + int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { @@ -329,27 +354,6 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) -{ - u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0}; - void *srqc; - void *xrc_srqc; - int err; - - MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); - MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); - if (!err) { - xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, - xrc_srq_context_entry); - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); - } - - return err; -} - int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) { u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index dfe36cf6fbea..177e076b8d17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1070,6 +1070,32 @@ free: } EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); +int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, + u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down) +{ + u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + int err; + + MLX5_SET(query_vnic_env_in, in, opcode, + MLX5_CMD_OP_QUERY_VNIC_ENV); + MLX5_SET(query_vnic_env_in, in, op_mod, 0); + MLX5_SET(query_vnic_env_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vnic_env_in, in, other_vport, 1); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, + vport_env.receive_discard_vport_down); + *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, + vport_env.transmit_discard_vport_down); + return 0; +} + int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 6bcfc25350f5..ea66448ba365 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) { - return wq->sz_m1 + 1; + return wq->fbc.sz_m1 + 1; } u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) @@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) { - return mlx5_cqwq_get_size(wq) << wq->log_stride; + return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride; } static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) @@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->buf = wq_ctrl->buf.direct.buf; + wq->buf = wq_ctrl->buf.frags->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->rq.buf = wq_ctrl->buf.direct.buf; + wq->rq.buf = wq_ctrl->buf.frags->buf; wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, { int err; - wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); - wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); - wq->sz_m1 = (1 << wq->log_sz) - 1; - wq->log_frag_strides = PAGE_SHIFT - wq->log_stride; - wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1; + mlx5_core_init_cq_frag_buf(&wq->fbc, cqc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->frag_buf = wq_ctrl->frag_buf; + wq->fbc.frag_buf = wq_ctrl->frag_buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->buf = wq_ctrl->buf.direct.buf; + wq->buf = wq_ctrl->buf.frags->buf; wq->db = wq_ctrl->db.db; for (i = 0; i < wq->sz_m1; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 718589d0cec2..fca90b94596d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -45,7 +45,7 @@ struct mlx5_wq_param { struct mlx5_wq_ctrl { struct mlx5_core_dev *mdev; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; struct mlx5_db db; }; @@ -68,14 +68,9 @@ struct mlx5_wq_qp { }; struct mlx5_cqwq { - struct mlx5_frag_buf frag_buf; - __be32 *db; - u32 sz_m1; - u32 frag_sz_m1; - u32 cc; /* consumer counter */ - u8 log_sz; - u8 log_stride; - u8 log_frag_strides; + struct mlx5_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ }; struct mlx5_wq_ll { @@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) { - return wq->cc & wq->sz_m1; + return wq->cc & wq->fbc.sz_m1; } static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) { - unsigned int frag = (ix >> wq->log_frag_strides); - - return wq->frag_buf.frags[frag].buf + - ((wq->frag_sz_m1 & ix) << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) { - return wq->cc >> wq->log_sz; + return wq->cc >> wq->fbc.log_sz; } static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index d56eea310509..f4d9c9975ac3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -76,6 +76,8 @@ config MLXSW_SPECTRUM depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n depends on IPV6 || IPV6=n + depends on NET_IPGRE || NET_IPGRE=n + depends on IPV6_GRE || IPV6_GRE=n select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9463c3fa254f..0cadcabfe86f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_cnt.o spectrum_fid.o \ spectrum_ipip.o spectrum_acl_flex_actions.o \ spectrum_mr.o spectrum_mr_tcam.o \ - spectrum_qdisc.o + spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 3529b545675d..93ea56620a24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1008,6 +1008,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; + struct mlxsw_res *res; size_t alloc_size; int err; @@ -1032,8 +1033,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, mlxsw_core->bus_priv = bus_priv; mlxsw_core->bus_info = mlxsw_bus_info; - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, - &mlxsw_core->res); + res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); if (err) goto err_bus_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 5ddafd74dc00..092d39399f3c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -235,8 +235,7 @@ struct mlxsw_config_profile { used_max_pkey:1, used_ar_sec:1, used_adaptive_routing_group_cap:1, - used_kvd_split_data:1; /* indicate for the kvd's values */ - + used_kvd_sizes:1; u8 max_vepa_channels; u16 max_mid; u16 max_pgt; @@ -256,10 +255,8 @@ struct mlxsw_config_profile { u16 adaptive_routing_group_cap; u8 arn; u32 kvd_linear_size; - u16 kvd_hash_granularity; u8 kvd_hash_single_parts; u8 kvd_hash_double_parts; - u8 resource_query_enable; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -316,6 +313,7 @@ struct mlxsw_driver { u64 *p_linear_size); u8 txhdr_len; const struct mlxsw_config_profile *profile; + bool res_query_enabled; }; int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, @@ -326,14 +324,14 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); -#define MLXSW_CORE_RES_VALID(res, short_res_id) \ - mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id) +#define MLXSW_CORE_RES_VALID(mlxsw_core, short_res_id) \ + mlxsw_core_res_valid(mlxsw_core, MLXSW_RES_ID_##short_res_id) u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); -#define MLXSW_CORE_RES_GET(res, short_res_id) \ - mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id) +#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \ + mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id) #define MLXSW_BUS_F_TXRX BIT(0) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index b698fb481b2e..3c0d882ba183 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) } EXPORT_SYMBOL(mlxsw_afa_block_jump); +int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block) +{ + if (block->finished) + return -EINVAL; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); + block->finished = true; + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_terminate); + static struct mlxsw_afa_fwd_entry * mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) { @@ -838,7 +849,6 @@ struct mlxsw_afa_mirror { struct mlxsw_afa_resource resource; int span_id; u8 local_in_port; - u8 local_out_port; bool ingress; }; @@ -848,7 +858,7 @@ mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, { block->afa->ops->mirror_del(block->afa->ops_priv, mirror->local_in_port, - mirror->local_out_port, + mirror->span_id, mirror->ingress); kfree(mirror); } @@ -864,9 +874,8 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_mirror * -mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, - bool ingress) +mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port, + const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; int err; @@ -876,13 +885,12 @@ mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, return ERR_PTR(-ENOMEM); err = block->afa->ops->mirror_add(block->afa->ops_priv, - local_in_port, local_out_port, + local_in_port, out_dev, ingress, &mirror->span_id); if (err) goto err_mirror_add; mirror->ingress = ingress; - mirror->local_out_port = local_out_port; mirror->local_in_port = local_in_port; mirror->resource.destructor = mlxsw_afa_mirror_destructor; mlxsw_afa_resource_add(block, &mirror->resource); @@ -909,13 +917,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, } int -mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, bool ingress) +mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, + const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; int err; - mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port, + mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev, ingress); if (IS_ERR(mirror)) return PTR_ERR(mirror); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 43132293475c..3a155d104384 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -36,6 +36,7 @@ #define _MLXSW_CORE_ACL_FLEX_ACTIONS_H #include <linux/types.h> +#include <linux/netdevice.h> struct mlxsw_afa; struct mlxsw_afa_block; @@ -48,9 +49,10 @@ struct mlxsw_afa_ops { void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); void (*counter_index_put)(void *priv, unsigned int counter_index); - int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port, + int (*mirror_add)(void *priv, u8 local_in_port, + const struct net_device *out_dev, bool ingress, int *p_span_id); - void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port, + void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); }; @@ -65,12 +67,14 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, + u8 local_in_port, + const struct net_device *out_dev, bool ingress); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), - MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), - MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), - MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), }; -#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 struct mlxsw_afk_element_inst { /* element instance in actual block */ const struct mlxsw_afk_element_info *info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index ab710e37af99..84185f8dfbae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -218,32 +218,32 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, switch (attr_type) { case MLXSW_HWMON_ATTR_TYPE_TEMP: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_input", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_highest", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_TEMP_RST: mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0200; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_reset_history", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_FAN_RPM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "fan%u_input", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_PWM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0644; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "pwm%u", num + 1); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 85faa87bf42d..3a9381977d6d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1015,16 +1015,14 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, } static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_res *res, - u8 query_enabled) + struct mlxsw_res *res) { int index, i; u64 data; u16 id; int err; - /* Not all the versions support resources query */ - if (!query_enabled) + if (!res) return 0; mlxsw_cmd_mbox_zero(mbox); @@ -1164,7 +1162,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } - if (MLXSW_RES_VALID(res, KVD_SIZE)) { + if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) { err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res); if (err) return err; @@ -1376,8 +1374,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; - err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res, - profile->resource_query_enable); + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res); if (err) goto err_query_resources; @@ -1519,8 +1516,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, u8 *p_status) { struct mlxsw_pci *mlxsw_pci = bus_priv; - dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; - dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; + dma_addr_t in_mapaddr = 0, out_mapaddr = 0; bool evreq = mlxsw_pci->cmd.nopoll; unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); bool *p_wait_done = &mlxsw_pci->cmd.wait_done; @@ -1532,11 +1528,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, if (err) return err; - if (in_mbox) + if (in_mbox) { memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); + in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; + } mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); + if (out_mbox) + out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0e08be41c8e0..6218231e379e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,11 +1,11 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -2872,6 +2872,14 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN); +/* an_disable_admin + * Auto negotiation disable administrative configuration + * 0 - Device doesn't support AN disable. + * 1 - Device supports AN disable. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1); + /* reg_ptys_local_port * Local port number. * Access: Index @@ -3000,12 +3008,13 @@ MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16); MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32); static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, - u32 proto_admin) + u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); mlxsw_reg_ptys_local_port_set(payload, local_port); mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH); mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin); + mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); } static inline void mlxsw_reg_ptys_eth_unpack(char *payload, @@ -4216,6 +4225,12 @@ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); */ MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1); +/* reg_ritr_ipv6_mc + * IPv6 multicast routing enable. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1); + enum mlxsw_reg_ritr_if_type { /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, @@ -4281,6 +4296,14 @@ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); */ MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1); +/* reg_ritr_ipv6_mc_fe + * IPv6 Multicast Forwarding Enable. + * When disabled, forwarding is blocked but local traffic (traps and IP to me) + * will be enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1); + /* reg_ritr_lb_en * Loop-back filter enable for unicast packets. * If the flag is set then loop-back filter for unicast packets is @@ -4504,12 +4527,14 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_ipv4_set(payload, 1); mlxsw_reg_ritr_ipv6_set(payload, 1); mlxsw_reg_ritr_ipv4_mc_set(payload, 1); + mlxsw_reg_ritr_ipv6_mc_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_ipv6_fe_set(payload, 1); mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1); + mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); @@ -6293,30 +6318,34 @@ MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1); */ MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16); -/* reg_rmft2_dip4 - * Destination IPv4 address +/* reg_rmft2_dip{4,6} + * Destination IPv4/6 address * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16); MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32); -/* reg_rmft2_dip4_mask +/* reg_rmft2_dip{4,6}_mask * A bit that is set directs the TCAM to compare the corresponding bit in key. A * bit that is clear directs the TCAM to ignore the corresponding bit in key. * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16); MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32); -/* reg_rmft2_sip4 - * Source IPv4 address +/* reg_rmft2_sip{4,6} + * Source IPv4/6 address * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16); MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32); -/* reg_rmft2_sip4_mask +/* reg_rmft2_sip{4,6}_mask * A bit that is set directs the TCAM to compare the corresponding bit in key. A * bit that is clear directs the TCAM to ignore the corresponding bit in key. * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16); MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32); /* reg_rmft2_flexible_action_set @@ -6334,26 +6363,52 @@ MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80, MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void -mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, - enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, - u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, - const char *flexible_action_set) +mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset, + u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + const char *flex_action_set) { MLXSW_REG_ZERO(rmft2, payload); mlxsw_reg_rmft2_v_set(payload, v); - mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE); mlxsw_reg_rmft2_offset_set(payload, offset); mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router); mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask); mlxsw_reg_rmft2_irif_set(payload, irif); + if (flex_action_set) + mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, + flex_action_set); +} + +static inline void +mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, + const char *flexible_action_set) +{ + mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router, + irif_mask, irif, flexible_action_set); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); mlxsw_reg_rmft2_dip4_set(payload, dip4); mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask); mlxsw_reg_rmft2_sip4_set(payload, sip4); mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask); - if (flexible_action_set) - mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, - flexible_action_set); +} + +static inline void +mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + struct in6_addr dip6, struct in6_addr dip6_mask, + struct in6_addr sip6, struct in6_addr sip6_mask, + const char *flexible_action_set) +{ + mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router, + irif_mask, irif, flexible_action_set); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6); + mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6); + mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask); + mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6); + mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask); } /* MFCR - Management Fan Control Register @@ -6772,8 +6827,104 @@ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); */ MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1); +enum mlxsw_reg_mpat_span_type { + /* Local SPAN Ethernet. + * The original packet is not encapsulated. + */ + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0, + + /* Encapsulated Remote SPAN Ethernet L3 GRE. + * The packet is encapsulated with GRE header. + */ + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3, +}; + +/* reg_mpat_span_type + * SPAN type. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4); + +/* Remote SPAN - Ethernet VLAN + * - - - - - - - - - - - - - - + */ + +/* reg_mpat_eth_rspan_vid + * Encapsulation header VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12); + +/* Encapsulated Remote SPAN - Ethernet L2 + * - - - - - - - - - - - - - - - - - - - + */ + +enum mlxsw_reg_mpat_eth_rspan_version { + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15, +}; + +/* reg_mpat_eth_rspan_version + * RSPAN mirror header version. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4); + +/* reg_mpat_eth_rspan_mac + * Destination MAC address. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6); + +/* reg_mpat_eth_rspan_tp + * Tag Packet. Indicates whether the mirroring header should be VLAN tagged. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1); + +/* Encapsulated Remote SPAN - Ethernet L3 + * - - - - - - - - - - - - - - - - - - - + */ + +enum mlxsw_reg_mpat_eth_rspan_protocol { + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6, +}; + +/* reg_mpat_eth_rspan_protocol + * SPAN encapsulation protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4); + +/* reg_mpat_eth_rspan_ttl + * Encapsulation header Time-to-Live/HopLimit. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8); + +/* reg_mpat_eth_rspan_smac + * Source MAC address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6); + +/* reg_mpat_eth_rspan_dip* + * Destination IP address. The IP version is configured by protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32); +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16); + +/* reg_mpat_eth_rspan_sip* + * Source IP address. The IP version is configured by protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32); +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16); + static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, - u16 system_port, bool e) + u16 system_port, bool e, + enum mlxsw_reg_mpat_span_type span_type) { MLXSW_REG_ZERO(mpat, payload); mlxsw_reg_mpat_pa_id_set(payload, pa_id); @@ -6781,6 +6932,49 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, mlxsw_reg_mpat_e_set(payload, e); mlxsw_reg_mpat_qos_set(payload, 1); mlxsw_reg_mpat_be_set(payload, 1); + mlxsw_reg_mpat_span_type_set(payload, span_type); +} + +static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid) +{ + mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload, + enum mlxsw_reg_mpat_eth_rspan_version version, + const char *mac, + bool tp) +{ + mlxsw_reg_mpat_eth_rspan_version_set(payload, version); + mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac); + mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl, + const char *smac, + u32 sip, u32 dip) +{ + mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl); + mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac); + mlxsw_reg_mpat_eth_rspan_protocol_set(payload, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4); + mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip); + mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl, + const char *smac, + struct in6_addr sip, struct in6_addr dip) +{ + mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl); + mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac); + mlxsw_reg_mpat_eth_rspan_protocol_set(payload, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6); + mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip); + mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip); } /* MPAR - Monitoring Port Analyzer Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dcc58d61506..53fffd09d133 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> @@ -71,11 +71,12 @@ #include "spectrum_cnt.h" #include "spectrum_dpipe.h" #include "spectrum_acl_flex_actions.h" +#include "spectrum_span.h" #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1530 -#define MLXSW_FWREV_SUBMINOR 152 +#define MLXSW_FWREV_MINOR 1620 +#define MLXSW_FWREV_SUBMINOR 192 #define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP_FW_FILENAME \ @@ -487,327 +488,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } -static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) - return -EIO; - - mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, - MAX_SPAN); - mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, - sizeof(struct mlxsw_sp_span_entry), - GFP_KERNEL); - if (!mlxsw_sp->span.entries) - return -ENOMEM; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) - INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); - - return 0; -} - -static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); - } - kfree(mlxsw_sp->span.entries); -} - -static struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - u8 local_port = port->local_port; - int index; - int i; - int err; - - /* find a free entry to use */ - index = -1; - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - if (!mlxsw_sp->span.entries[i].used) { - index = i; - span_entry = &mlxsw_sp->span.entries[i]; - break; - } - } - if (index < 0) - return NULL; - - /* create a new port analayzer entry for local_port */ - mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - if (err) - return NULL; - - span_entry->used = true; - span_entry->id = index; - span_entry->ref_count = 1; - span_entry->local_port = local_port; - return span_entry; -} - -static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) -{ - u8 local_port = span_entry->local_port; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - int pa_id = span_entry->id; - - mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - span_entry->used = false; -} - -struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) -{ - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - if (curr->used && curr->local_port == local_port) - return curr; - } - return NULL; -} - -static struct mlxsw_sp_span_entry -*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, - port->local_port); - if (span_entry) { - /* Already exists, just take a reference */ - span_entry->ref_count++; - return span_entry; - } - - return mlxsw_sp_span_entry_create(port); -} - -static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) -{ - WARN_ON(!span_entry->ref_count); - if (--span_entry->ref_count == 0) - mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); - return 0; -} - -static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_inspected_port *p; - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - list_for_each_entry(p, &curr->bound_ports_list, list) - if (p->local_port == port->local_port && - p->type == MLXSW_SP_SPAN_EGRESS) - return true; - } - - return false; -} - -static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, - int mtu) -{ - return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; -} - -static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int err; - - /* If port is egress mirrored, the shared buffer size should be - * updated according to the mtu value - */ - if (mlxsw_sp_span_is_egress_mirror(port)) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); - return err; - } - } - - return 0; -} - -static struct mlxsw_sp_span_inspected_port * -mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry) -{ - struct mlxsw_sp_span_inspected_port *p; - - list_for_each_entry(p, &span_entry->bound_ports_list, list) - if (port->local_port == p->local_port) - return p; - return NULL; -} - -static int -mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char mpar_pl[MLXSW_REG_MPAR_LEN]; - int pa_id = span_entry->id; - - /* bind the port to the SPAN entry */ - mlxsw_reg_mpar_pack(mpar_pl, port->local_port, - (enum mlxsw_reg_mpar_i_e) type, bind, pa_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); -} - -static int -mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int err; - - /* if it is an egress SPAN, bind a shared buffer to it */ - if (type == MLXSW_SP_SPAN_EGRESS) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, - port->dev->mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); - return err; - } - } - - if (bind) { - err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - true); - if (err) - goto err_port_bind; - } - - inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); - if (!inspected_port) { - err = -ENOMEM; - goto err_inspected_port_alloc; - } - inspected_port->local_port = port->local_port; - inspected_port->type = type; - list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); - - return 0; - -err_inspected_port_alloc: - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); -err_port_bind: - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - return err; -} - -static void -mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - - inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); - if (!inspected_port) - return; - - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); - /* remove the SBIB buffer if it was egress SPAN */ - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - - list_del(&inspected_port->list); - kfree(inspected_port); -} - -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, bool bind) -{ - struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - int err; - - span_entry = mlxsw_sp_span_entry_get(to); - if (!span_entry) - return -ENOENT; - - netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", - span_entry->id); - - err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); - if (err) - goto err_port_bind; - - return 0; - -err_port_bind: - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - return err; -} - -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, - enum mlxsw_sp_span_type type, bool bind) -{ - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, - destination_port); - if (!span_entry) { - netdev_err(from->dev, "no span entry found\n"); - return; - } - - netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", - span_entry->id); - mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); -} - static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable, u32 rate) { @@ -1360,6 +1040,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev, xstats->tail_drop[i] = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, + i, ppcnt_pl); + if (err) + continue; + + xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); + xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); + } } static void update_stats_cache(struct work_struct *work) @@ -1459,6 +1149,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; + mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1486,8 +1177,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) + if (mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan->ref_count++; return mlxsw_sp_port_vlan; + } return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); } @@ -1496,6 +1189,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + if (--mlxsw_sp_port_vlan->ref_count != 0) + return; + if (mlxsw_sp_port_vlan->bridge_port) mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); else if (fid) @@ -1578,7 +1274,6 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress) { enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; struct net_device *to_dev; to_dev = tcf_mirred_dev(a); @@ -1587,17 +1282,10 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - if (!mlxsw_sp_port_dev_check(to_dev)) { - netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -EOPNOTSUPP; - } - to_port = netdev_priv(to_dev); - - mirror->to_local_port = to_port->local_port; mirror->ingress = ingress; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type, - true); + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, + true, &mirror->span_id); } static void @@ -1608,7 +1296,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port, + mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, span_type, true); } @@ -2702,7 +2390,7 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, int err; autoneg = mlxsw_sp_port->link.autoneg; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2736,7 +2424,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, bool autoneg; int err; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2754,7 +2442,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, } mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_new); + eth_proto_new, autoneg); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2965,7 +2653,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_admin); + eth_proto_admin, mlxsw_sp_port->link.autoneg); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } @@ -3692,6 +3380,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), + MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), @@ -3995,14 +3684,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + + /* Initialize router after SPAN is initialized, so that the FIB and + * neighbor event handlers can issue SPAN respin. + */ err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); goto err_router_init; } - /* Initialize netdevice notifier after router is initialized, so that - * the event handler can use router structures. + /* Initialize netdevice notifier after router and SPAN is initialized, + * so that the event handler can use router structures and call SPAN + * respin. */ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); @@ -4011,12 +3710,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_netdev_notifier; } - err = mlxsw_sp_span_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); - goto err_span_init; - } - err = mlxsw_sp_acl_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); @@ -4042,12 +3735,12 @@ err_ports_create: err_dpipe_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: - mlxsw_sp_span_fini(mlxsw_sp); -err_span_init: unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -4073,9 +3766,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); - mlxsw_sp_span_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -4087,12 +3780,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) } static const struct mlxsw_config_profile mlxsw_sp_config_profile = { - .used_max_vepa_channels = 1, - .max_vepa_channels = 0, .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, - .used_max_pgt = 1, - .max_pgt = 0, .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, @@ -4104,8 +3793,7 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, - .used_kvd_split_data = 1, - .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, + .used_kvd_sizes = 1, .kvd_hash_single_parts = 59, .kvd_hash_double_parts = 41, .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, @@ -4115,73 +3803,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, - .resource_query_enable = 1, }; -static bool -mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack, - u64 size) -{ - const struct mlxsw_config_profile *profile; - - profile = &mlxsw_sp_config_profile; - if (size % profile->kvd_hash_granularity) { - NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity"); - return false; - } - return true; -} - -static int -mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed"); - return -EINVAL; -} - -static int -mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - return 0; -} - -static int -mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum"); - return -EINVAL; - } - return 0; -} - -static int -mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum"); - return -EINVAL; - } - return 0; -} - static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); @@ -4190,30 +3813,16 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) return mlxsw_sp_kvdl_occ_get(mlxsw_sp); } -static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = { - .size_validate = mlxsw_sp_resource_kvd_size_validate, -}; - -static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { - .size_validate = mlxsw_sp_resource_kvd_linear_size_validate, +static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, }; -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate, -}; - -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, -}; - -static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; -static struct devlink_resource_size_params mlxsw_sp_linear_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; - static void -mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) +mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, + struct devlink_resource_size_params *kvd_size_params, + struct devlink_resource_size_params *linear_size_params, + struct devlink_resource_size_params *hash_double_size_params, + struct devlink_resource_size_params *hash_single_size_params) { u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE); @@ -4222,37 +3831,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); u32 linear_size_min = 0; - /* KVD top resource */ - mlxsw_sp_kvd_size_params.size_min = kvd_size; - mlxsw_sp_kvd_size_params.size_max = kvd_size; - mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Linear part init */ - mlxsw_sp_linear_size_params.size_min = linear_size_min; - mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - - double_size_min; - mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash double part init */ - mlxsw_sp_hash_double_size_params.size_min = double_size_min; - mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - - linear_size_min; - mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash single part init */ - mlxsw_sp_hash_single_size_params.size_min = single_size_min; - mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - - linear_size_min; - mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(linear_size_params, linear_size_min, + kvd_size - single_size_min - + double_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_double_size_params, + double_size_min, + kvd_size - single_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_single_size_params, + single_size_min, + kvd_size - double_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); } static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params hash_single_size_params; + struct devlink_resource_size_params hash_double_size_params; + struct devlink_resource_size_params linear_size_params; + struct devlink_resource_size_params kvd_size_params; u32 kvd_size, single_size, double_size, linear_size; const struct mlxsw_config_profile *profile; int err; @@ -4261,48 +3868,55 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO; - mlxsw_sp_resource_size_params_prepare(mlxsw_core); + mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, + &linear_size_params, + &hash_double_size_params, + &hash_single_size_params); + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, - true, kvd_size, - MLXSW_SP_RESOURCE_KVD, + kvd_size, MLXSW_SP_RESOURCE_KVD, DEVLINK_RESOURCE_ID_PARENT_TOP, - &mlxsw_sp_kvd_size_params, - &mlxsw_sp_resource_kvd_ops); + &kvd_size_params, + NULL); if (err) return err; linear_size = profile->kvd_linear_size; err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, - false, linear_size, + linear_size, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_linear_size_params, + &linear_size_params, &mlxsw_sp_resource_kvd_linear_ops); if (err) return err; + err = mlxsw_sp_kvdl_resources_register(mlxsw_core); + if (err) + return err; + double_size = kvd_size - linear_size; double_size *= profile->kvd_hash_double_parts; double_size /= profile->kvd_hash_double_parts + profile->kvd_hash_single_parts; - double_size = rounddown(double_size, profile->kvd_hash_granularity); + double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, - false, double_size, + double_size, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_double_size_params, - &mlxsw_sp_resource_kvd_hash_double_ops); + &hash_double_size_params, + NULL); if (err) return err; single_size = kvd_size - double_size - linear_size; err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, - false, single_size, + single_size, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_single_size_params, - &mlxsw_sp_resource_kvd_hash_single_ops); + &hash_single_size_params, + NULL); if (err) return err; @@ -4319,8 +3933,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, int err; if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || - !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || - !profile->used_kvd_split_data) + !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) return -EIO; /* The hash part is what left of the kvd without the @@ -4346,7 +3959,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, double_size /= profile->kvd_hash_double_parts + profile->kvd_hash_single_parts; *p_double_size = rounddown(double_size, - profile->kvd_hash_granularity); + MLXSW_SP_KVD_GRANULARITY); } err = devlink_resource_size_get(devlink, @@ -4388,6 +4001,7 @@ static struct mlxsw_driver mlxsw_sp_driver = { .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, + .res_query_enabled = true, }; bool mlxsw_sp_port_dev_check(const struct net_device *dev) @@ -4556,13 +4170,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, u16 lag_id; if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { - NL_SET_ERR_MSG(extack, - "spectrum: Exceeded number of supported LAG devices"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); return false; } if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { - NL_SET_ERR_MSG(extack, - "spectrum: LAG device using unsupported Tx type"); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); return false; } return true; @@ -4804,8 +4416,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && !netif_is_ovs_master(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Unknown upper device type"); + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@ -4814,8 +4425,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } if (netif_is_lag_master(upper_dev) && @@ -4823,24 +4433,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, info->upper_info, extack)) return -EINVAL; if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is a LAG master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); return -EINVAL; } if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on a LAG port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is an OVS master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; } if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on an OVS port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } break; @@ -4953,7 +4559,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); + NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); return -EINVAL; } if (!info->linking) @@ -4962,7 +4568,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } break; @@ -5040,10 +4646,18 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlxsw_sp_span_entry *span_entry; struct mlxsw_sp *mlxsw_sp; int err = 0; mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); + if (event == NETDEV_UNREGISTER) { + span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); + if (span_entry) + mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); + } + mlxsw_sp_span_respin(mlxsw_sp); + if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, event, ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bdd8f94a452c..82820ba43728 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -70,16 +70,23 @@ #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks" enum mlxsw_sp_resource_id { - MLXSW_SP_RESOURCE_KVD, + MLXSW_SP_RESOURCE_KVD = 1, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, }; struct mlxsw_sp_port; struct mlxsw_sp_rif; +struct mlxsw_sp_span_entry; struct mlxsw_sp_upper { struct net_device *dev; @@ -111,32 +118,13 @@ struct mlxsw_sp_mid { unsigned long *ports_in_mid; /* bits array */ }; -enum mlxsw_sp_span_type { - MLXSW_SP_SPAN_EGRESS, - MLXSW_SP_SPAN_INGRESS -}; - -struct mlxsw_sp_span_inspected_port { - struct list_head list; - enum mlxsw_sp_span_type type; - u8 local_port; -}; - -struct mlxsw_sp_span_entry { - u8 local_port; - bool used; - struct list_head bound_ports_list; - int ref_count; - int id; -}; - enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { - u8 to_local_port; + int span_id; bool ingress; }; @@ -211,6 +199,7 @@ struct mlxsw_sp_port_vlan { struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; + unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; @@ -222,6 +211,8 @@ struct mlxsw_sp_port_xstats { u64 wred_drop[TC_MAX_QUEUE]; u64 tail_drop[TC_MAX_QUEUE]; u64 backlog[TC_MAX_QUEUE]; + u64 tx_bytes[IEEE_8021QAZ_MAX_TCS]; + u64 tx_packets[IEEE_8021QAZ_MAX_TCS]; }; struct mlxsw_sp_port { @@ -259,6 +250,7 @@ struct mlxsw_sp_port { struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; struct mlxsw_sp_qdisc *root_qdisc; + struct mlxsw_sp_qdisc *tclass_qdiscs; unsigned acl_rule_count; struct mlxsw_sp_acl_block *ing_acl_block; struct mlxsw_sp_acl_block *eg_acl_block; @@ -396,16 +388,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, - bool bind); -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, - u8 destination_port, - enum mlxsw_sp_span_type type, - bool bind); -struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port); /* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB @@ -461,6 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, unsigned int *p_alloc_size); u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); struct mlxsw_sp_acl_rule_info { unsigned int priority; @@ -552,6 +535,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, u16 group_id); +int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 0897a5435cc2..79b1fa27a9a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block) return block->disable_count; } +static bool +mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) +{ + /* We hold a reference on ruleset ourselves */ + return ruleset->ref_count == 2; +} + static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, @@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_ht_insert; - if (!chain_index) { - /* We only need ruleset with chain index 0, the implicit one, - * to be directly bound to device. The rest of the rulesets - * are bound by "Goto action set". - */ - err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block); - if (err) - goto err_ruleset_bind; - } - return ruleset; -err_ruleset_bind: - rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, - mlxsw_sp_acl_ruleset_ht_params); err_ht_insert: ops->ruleset_del(mlxsw_sp, ruleset->priv); err_ops_ruleset_add: @@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; - struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; - u32 chain_index = ruleset->ht_key.chain_index; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - if (!chain_index) - mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block); rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); ops->ruleset_del(mlxsw_sp, ruleset->priv); @@ -528,6 +518,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, return mlxsw_afa_block_jump(rulei->act_block, group_id); } +int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_terminate(rulei->act_block); +} + int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) { return mlxsw_afa_block_append_drop(rulei->act_block); @@ -572,7 +567,6 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct net_device *out_dev) { struct mlxsw_sp_acl_block_binding *binding; - struct mlxsw_sp_port *out_port; struct mlxsw_sp_port *in_port; if (!list_is_singular(&block->binding_list)) @@ -581,16 +575,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, binding = list_first_entry(&block->binding_list, struct mlxsw_sp_acl_block_binding, list); in_port = binding->mlxsw_sp_port; - if (!mlxsw_sp_port_dev_check(out_dev)) - return -EINVAL; - - out_port = netdev_priv(out_dev); - if (out_port->mlxsw_sp != mlxsw_sp) - return -EINVAL; return mlxsw_afa_block_append_mirror(rulei->act_block, in_port->local_port, - out_port->local_port, + out_dev, binding->ingress); } @@ -695,10 +683,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_insert; + if (!ruleset->ht_key.chain_index && + mlxsw_sp_acl_ruleset_is_singular(ruleset)) { + /* We only need ruleset with chain index 0, the implicit + * one, to be directly bound to device. The rest of the + * rulesets are bound by "Goto action set". + */ + err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, + ruleset->ht_key.block); + if (err) + goto err_ruleset_block_bind; + } + list_add_tail(&rule->list, &mlxsw_sp->acl->rules); ruleset->ht_key.block->rule_count++; return 0; +err_ruleset_block_bind: + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); err_rhashtable_insert: ops->rule_del(mlxsw_sp, rule->priv); return err; @@ -712,6 +715,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, ruleset->ht_key.block->rule_count--; list_del(&rule->list); + if (!ruleset->ht_key.chain_index && + mlxsw_sp_acl_ruleset_is_singular(ruleset)) + mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, + ruleset->ht_key.block); rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, mlxsw_sp_acl_rule_ht_params); ops->rule_del(mlxsw_sp, rule->priv); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 6ca6894125f0..510ce48d87f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> * @@ -35,6 +35,7 @@ #include "spectrum_acl_flex_actions.h" #include "core_acl_flex_actions.h" +#include "spectrum_span.h" #define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 @@ -125,40 +126,23 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index) } static int -mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port, +mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, + const struct net_device *out_dev, bool ingress, int *p_span_id) { - struct mlxsw_sp_port *in_port, *out_port; - struct mlxsw_sp_span_entry *span_entry; + struct mlxsw_sp_port *in_port; struct mlxsw_sp *mlxsw_sp = priv; enum mlxsw_sp_span_type type; - int err; type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - out_port = mlxsw_sp->ports[local_out_port]; in_port = mlxsw_sp->ports[local_in_port]; - err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false); - if (err) - return err; - - span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port); - if (!span_entry) { - err = -ENOENT; - goto err_span_entry_find; - } - - *p_span_id = span_entry->id; - return 0; - -err_span_entry_find: - mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); - return err; + return mlxsw_sp_span_mirror_add(in_port, out_dev, type, + false, p_span_id); } static void -mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port, - bool ingress) +mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) { struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_port *in_port; @@ -167,7 +151,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port, type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; in_port = mlxsw_sp->ports[local_in_port]; - mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); + mlxsw_sp_span_mirror_del(in_port, span_id, type, false); } static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h index 2726192836ad..bd6d552d95b9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h @@ -33,8 +33,8 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H -#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H #include "spectrum.h" diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index c6e180c2be1e..ad1b548e3cac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); - if (err) - goto err_group_update; - err = rhashtable_init(&group->chunk_ht, &mlxsw_sp_acl_tcam_chunk_ht_params); if (err) @@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, return 0; err_rhashtable_init: -err_group_update: mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 93728c694e6d..0a9adc5962fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(10000, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 6ce00e28d4ea..89dbf569dff5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_ok(a)) { - err = mlxsw_sp_acl_rulei_act_continue(rulei); + err = mlxsw_sp_acl_rulei_act_terminate(rulei); if (err) return err; } else if (is_tcf_gact_shot(a)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 7502e53447bd..98d896c14b87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,126 +33,125 @@ */ #include <net/ip_tunnels.h> +#include <net/ip6_tunnel.h> #include "spectrum_ipip.h" struct ip_tunnel_parm -mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev) +mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); return tun->parms; } -static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms) +struct __ip6_tnl_parm +mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) +{ + struct ip6_tnl *tun = netdev_priv(ol_dev); + + return tun->parms; +} + +static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms) { return !!(parms.i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms) { return !!(parms.o_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms) { - return mlxsw_sp_ipip_parms_has_ikey(parms) ? + return mlxsw_sp_ipip_parms4_has_ikey(parms) ? be32_to_cpu(parms.i_key) : 0; } -static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms) { - return mlxsw_sp_ipip_parms_has_okey(parms) ? + return mlxsw_sp_ipip_parms4_has_okey(parms) ? be32_to_cpu(parms.o_key) : 0; } -static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms) +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms) { - return parms.iph.saddr; + return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto, - struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms) { - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_parms_saddr4(parms), - }; - case MLXSW_SP_L3_PROTO_IPV6: - break; - } - - WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; + return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr }; } -static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms) +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms) { - return parms.iph.daddr; + return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto, - struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms) +{ + return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr }; +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) { + struct ip_tunnel_parm parms4; + struct __ip6_tnl_parm parms6; + switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_parms_daddr4(parms), - }; + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + return mlxsw_sp_ipip_parms4_saddr(parms4); case MLXSW_SP_L3_PROTO_IPV6: - break; + parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); + return mlxsw_sp_ipip_parms6_saddr(parms6); } WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; -} - -static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); + return (union mlxsw_sp_l3addr) {0}; } -static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev) +static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) { - return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); -} -static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); -} + struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); -static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); + return mlxsw_sp_ipip_parms4_daddr(parms4).addr4; } -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - return mlxsw_sp_ipip_parms_saddr(proto, - mlxsw_sp_ipip_netdev_parms(ol_dev)); -} + struct ip_tunnel_parm parms4; + struct __ip6_tnl_parm parms6; -static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev)); + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + return mlxsw_sp_ipip_parms4_daddr(parms4); + case MLXSW_SP_L3_PROTO_IPV6: + parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); + return mlxsw_sp_ipip_parms6_daddr(parms6); + } + + WARN_ON(1); + return (union mlxsw_sp_l3addr) {0}; } -static union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) +bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) { - return mlxsw_sp_ipip_parms_daddr(proto, - mlxsw_sp_ipip_netdev_parms(ol_dev)); + union mlxsw_sp_l3addr naddr = {0}; + + return !memcmp(&addr, &naddr, sizeof(naddr)); } static int @@ -176,12 +175,17 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, u32 tunnel_index, struct mlxsw_sp_ipip_entry *ipip_entry) { - bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev); u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); - u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev); char rtdp_pl[MLXSW_REG_RTDP_LEN]; + struct ip_tunnel_parm parms; unsigned int type_check; + bool has_ikey; u32 daddr4; + u32 ikey; + + parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); + has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms); + ikey = mlxsw_sp_ipip_parms4_ikey(parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); @@ -243,15 +247,14 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto, { union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev); union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev); - union mlxsw_sp_l3addr naddr = {0}; /* Tunnels with unset local or remote address are valid in Linux and * used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access * (NBMA) tunnels. In principle these can be offloaded, but the driver * currently doesn't support this. So punt. */ - return memcmp(&saddr, &naddr, sizeof(naddr)) && - memcmp(&daddr, &naddr, sizeof(naddr)); + return !mlxsw_sp_l3addr_is_zero(saddr) && + !mlxsw_sp_l3addr_is_zero(daddr); } static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, @@ -273,14 +276,15 @@ static struct mlxsw_sp_rif_ipip_lb_config mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { + struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; - lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ? + lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ? MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; return (struct mlxsw_sp_rif_ipip_lb_config){ .lb_ipipt = lb_ipipt, - .okey = mlxsw_sp_ipip_netdev_okey(ol_dev), + .okey = mlxsw_sp_ipip_parms4_okey(parms), .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, ol_dev), @@ -300,16 +304,12 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, bool update_nhs = false; int err = 0; - new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev); + new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, - new_parms); - old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, - ipip_entry->parms); - new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, - new_parms); - old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, - ipip_entry->parms); + new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms); + old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4); + new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms); + old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4); if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); @@ -326,14 +326,14 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, } update_tunnel = true; - } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) != - mlxsw_sp_ipip_parms_okey(new_parms)) || - ipip_entry->parms.link != new_parms.link) { + } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) != + mlxsw_sp_ipip_parms4_okey(new_parms)) || + ipip_entry->parms4.link != new_parms.link) { update_tunnel = true; } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { update_nhs = true; - } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) != - mlxsw_sp_ipip_parms_ikey(new_parms)) { + } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) != + mlxsw_sp_ipip_parms4_ikey(new_parms)) { update_decap = true; } @@ -350,7 +350,7 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, false, false, false, extack); - ipip_entry->parms = new_parms; + ipip_entry->parms4 = new_parms; return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 04b08d9d76e9..6909d867bb59 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,14 +37,19 @@ #include "spectrum_router.h" #include <net/ip_fib.h> +#include <linux/if_tunnel.h> struct ip_tunnel_parm -mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev); +mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); +struct __ip6_tnl_parm +mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev); union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev); +bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr); + enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, MLXSW_SP_IPIP_TYPE_MAX, @@ -56,7 +61,9 @@ struct mlxsw_sp_ipip_entry { struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; - struct ip_tunnel_parm parms; + union { + struct ip_tunnel_parm parms4; + }; }; struct mlxsw_sp_ipip_ops { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 55f9d2d70f9e..8796db44dcc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -55,24 +55,47 @@ #define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) -#define MLXSW_SP_CHUNK_MAX 32 -#define MLXSW_SP_LARGE_CHUNK_MAX 512 +#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1 +#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32 +#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 struct mlxsw_sp_kvdl_part_info { unsigned int part_index; unsigned int start_index; unsigned int end_index; unsigned int alloc_size; + enum mlxsw_sp_resource_id resource_id; }; +enum mlxsw_sp_kvdl_part_id { + MLXSW_SP_KVDL_PART_ID_SINGLE, + MLXSW_SP_KVDL_PART_ID_CHUNKS, + MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS, +}; + +#define MLXSW_SP_KVDL_PART_INFO(id) \ +[MLXSW_SP_KVDL_PART_ID_##id] = { \ + .start_index = MLXSW_SP_KVDL_##id##_BASE, \ + .end_index = MLXSW_SP_KVDL_##id##_END, \ + .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \ + .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ +} + +static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = { + MLXSW_SP_KVDL_PART_INFO(SINGLE), + MLXSW_SP_KVDL_PART_INFO(CHUNKS), + MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS), +}; + +#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info) + struct mlxsw_sp_kvdl_part { - struct list_head list; - const struct mlxsw_sp_kvdl_part_info *info; + struct mlxsw_sp_kvdl_part_info info; unsigned long usage[0]; /* Entries */ }; struct mlxsw_sp_kvdl { - struct list_head parts_list; + struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN]; }; static struct mlxsw_sp_kvdl_part * @@ -80,11 +103,13 @@ mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, unsigned int alloc_size) { struct mlxsw_sp_kvdl_part *part, *min_part = NULL; + int i; - list_for_each_entry(part, &kvdl->parts_list, list) { - if (alloc_size <= part->info->alloc_size && + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (alloc_size <= part->info.alloc_size && (!min_part || - part->info->alloc_size <= min_part->info->alloc_size)) + part->info.alloc_size <= min_part->info.alloc_size)) min_part = part; } @@ -95,10 +120,12 @@ static struct mlxsw_sp_kvdl_part * mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) { struct mlxsw_sp_kvdl_part *part; + int i; - list_for_each_entry(part, &kvdl->parts_list, list) { - if (kvdl_index >= part->info->start_index && - kvdl_index <= part->info->end_index) + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (kvdl_index >= part->info.start_index && + kvdl_index <= part->info.end_index) return part; } @@ -122,7 +149,7 @@ mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, u32 *p_kvdl_index) { - const struct mlxsw_sp_kvdl_part_info *info = part->info; + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int entry_index, nr_entries; nr_entries = (info->end_index - info->start_index + 1) / @@ -132,8 +159,7 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, return -ENOBUFS; __set_bit(entry_index, part->usage); - *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info, - entry_index); + *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index); return 0; } @@ -141,10 +167,10 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, u32 kvdl_index) { + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int entry_index; - entry_index = mlxsw_sp_kvdl_index_entry_index(part->info, - kvdl_index); + entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index); __clear_bit(entry_index, part->usage); } @@ -183,135 +209,212 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(part)) return PTR_ERR(part); - *p_alloc_size = part->info->alloc_size; + *p_alloc_size = part->info.alloc_size; return 0; } -static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = { - { - .part_index = 0, - .start_index = MLXSW_SP_KVDL_SINGLE_BASE, - .end_index = MLXSW_SP_KVDL_SINGLE_END, - .alloc_size = 1, - }, - { - .part_index = 1, - .start_index = MLXSW_SP_KVDL_CHUNKS_BASE, - .end_index = MLXSW_SP_KVDL_CHUNKS_END, - .alloc_size = MLXSW_SP_CHUNK_MAX, - }, - { - .part_index = 2, - .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE, - .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END, - .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX, - }, -}; - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index) +static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part, + struct mlxsw_sp_kvdl_part *part_prev, + unsigned int size) { - struct mlxsw_sp_kvdl_part *part; - list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) { - if (part->info->part_index == part_index) - return part; + if (!part_prev) { + part->info.end_index = size - 1; + } else { + part->info.start_index = part_prev->info.end_index + 1; + part->info.end_index = part->info.start_index + size - 1; } - - return NULL; } -static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, - unsigned int part_index) +static struct mlxsw_sp_kvdl_part * +mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_kvdl_part_info *info, + struct mlxsw_sp_kvdl_part *part_prev) { - const struct mlxsw_sp_kvdl_part_info *info; + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_kvdl_part *part; + bool need_update = true; unsigned int nr_entries; size_t usage_size; + u64 resource_size; + int err; - info = &kvdl_parts_info[part_index]; + err = devlink_resource_size_get(devlink, info->resource_id, + &resource_size); + if (err) { + need_update = false; + resource_size = info->end_index - info->start_index + 1; + } - nr_entries = (info->end_index - info->start_index + 1) / - info->alloc_size; + nr_entries = div_u64(resource_size, info->alloc_size); usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); if (!part) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - part->info = info; - list_add(&part->list, &mlxsw_sp->kvdl->parts_list); + memcpy(&part->info, info, sizeof(part->info)); - return 0; + if (need_update) + mlxsw_sp_kvdl_part_update(part, part_prev, resource_size); + return part; } -static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp, - unsigned int part_index) +static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part) { - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index); - if (!part) - return; - - list_del(&part->list); kfree(part); } static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; + const struct mlxsw_sp_kvdl_part_info *info; + struct mlxsw_sp_kvdl_part *part_prev = NULL; int err, i; - INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list); - - for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) { - err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i); - if (err) + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info, + part_prev); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); goto err_kvdl_part_init; + } + part_prev = kvdl->parts[i]; } - return 0; err_kvdl_part_init: for (i--; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); + mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); return err; } static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; int i; - for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); } static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) { + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int nr_entries; int bit = -1; u64 occ = 0; - nr_entries = (part->info->end_index - - part->info->start_index + 1) / - part->info->alloc_size; + nr_entries = (info->end_index - + info->start_index + 1) / + info->alloc_size; while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) < nr_entries) - occ += part->info->alloc_size; + occ += info->alloc_size; return occ; } u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_kvdl_part *part; u64 occ = 0; + int i; - list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) - occ += mlxsw_sp_kvdl_part_occ(part); + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) + occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]); return occ; } +static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = { + .occ_get = mlxsw_sp_kvdl_single_occ_get, +}; + +static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = { + .occ_get = mlxsw_sp_kvdl_chunks_occ_get, +}; + +static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = { + .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get, +}; + +int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + static struct devlink_resource_size_params size_params; + u32 kvdl_max_size; + int err; + + kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, + MLXSW_SP_KVDL_SINGLE_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_single_ops); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, + MLXSW_SP_KVDL_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_chunks_ops); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_chunks_large_ops); + return err; +} + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index d20b143de3b4..a82539609d49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -33,6 +33,7 @@ */ #include <linux/rhashtable.h> +#include <net/ipv6.h> #include "spectrum_mr.h" #include "spectrum_router.h" @@ -47,6 +48,11 @@ struct mlxsw_sp_mr { /* priv has to be always the last item */ }; +struct mlxsw_sp_mr_vif; +struct mlxsw_sp_mr_vif_ops { + bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif); +}; + struct mlxsw_sp_mr_vif { struct net_device *dev; const struct mlxsw_sp_rif *rif; @@ -61,6 +67,9 @@ struct mlxsw_sp_mr_vif { * instance is used as an ingress VIF */ struct list_head route_ivif_list; + + /* Protocol specific operations for a VIF */ + const struct mlxsw_sp_mr_vif_ops *ops; }; struct mlxsw_sp_mr_route_vif_entry { @@ -70,6 +79,17 @@ struct mlxsw_sp_mr_route_vif_entry { struct mlxsw_sp_mr_route *mr_route; }; +struct mlxsw_sp_mr_table; +struct mlxsw_sp_mr_table_ops { + bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *mfc); + void (*key_create)(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *mfc); + bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route); +}; + struct mlxsw_sp_mr_table { struct list_head node; enum mlxsw_sp_l3proto proto; @@ -78,6 +98,7 @@ struct mlxsw_sp_mr_table { struct mlxsw_sp_mr_vif vifs[MAXVIFS]; struct list_head route_list; struct rhashtable route_ht; + const struct mlxsw_sp_mr_table_ops *ops; char catchall_route_priv[0]; /* catchall_route_priv has to be always the last item */ }; @@ -88,7 +109,7 @@ struct mlxsw_sp_mr_route { struct mlxsw_sp_mr_route_key key; enum mlxsw_sp_mr_route_action route_action; u16 min_mtu; - struct mfc_cache *mfc4; + struct mr_mfc *mfc; void *route_priv; const struct mlxsw_sp_mr_table *mr_table; /* A list of route_vif_entry structs that point to the egress VIFs */ @@ -104,14 +125,9 @@ static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = { .automatic_shrinking = true, }; -static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif) -{ - return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); -} - static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) { - return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif; + return vif->ops->is_regular(vif) && vif->dev && vif->rif; } static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) @@ -122,18 +138,9 @@ static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) static bool mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route) { - vifi_t ivif; + vifi_t ivif = mr_route->mfc->mfc_parent; - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - ivif = mr_route->mfc4->mfc_parent; - return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } - return false; + return mr_route->mfc->mfc_un.res.ttls[ivif] != 255; } static int @@ -149,19 +156,6 @@ mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route) return valid_evifs; } -static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route) -{ - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } - return false; -} - static enum mlxsw_sp_mr_route_action mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) { @@ -174,7 +168,8 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) /* The kernel does not match a (*,G) route that the ingress interface is * not one of the egress interfaces, so trap these kind of routes. */ - if (mlxsw_sp_mr_route_starg(mr_route) && + if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, + mr_route) && !mlxsw_sp_mr_route_ivif_in_evifs(mr_route)) return MLXSW_SP_MR_ROUTE_ACTION_TRAP; @@ -195,25 +190,11 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) static enum mlxsw_sp_mr_route_prio mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route) { - return mlxsw_sp_mr_route_starg(mr_route) ? + return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, + mr_route) ? MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG; } -static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route_key *key, - const struct mfc_cache *mfc) -{ - bool starg = (mfc->mfc_origin == htonl(INADDR_ANY)); - - memset(key, 0, sizeof(*key)); - key->vrid = mr_table->vr_id; - key->proto = mr_table->proto; - key->group.addr4 = mfc->mfc_mcastgrp; - key->group_mask.addr4 = htonl(0xffffffff); - key->source.addr4 = mfc->mfc_origin; - key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); -} - static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, struct mlxsw_sp_mr_vif *mr_vif) { @@ -343,8 +324,8 @@ static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table, } static struct mlxsw_sp_mr_route * -mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc) +mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc) { struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; struct mlxsw_sp_mr_route *mr_route; @@ -356,12 +337,13 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, if (!mr_route) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&mr_route->evif_list); - mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc); /* Find min_mtu and link iVIF and eVIFs */ mr_route->min_mtu = ETH_MAX_MTU; - ipmr_cache_hold(mfc); - mr_route->mfc4 = mfc; + mr_cache_hold(mfc); + mr_route->mfc = mfc; + mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc); + mr_route->mr_table = mr_table; for (i = 0; i < MAXVIFS; i++) { if (mfc->mfc_un.res.ttls[i] != 255) { @@ -374,59 +356,38 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, mr_route->min_mtu = mr_table->vifs[i].dev->mtu; } } - mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]); + mlxsw_sp_mr_route_ivif_link(mr_route, + &mr_table->vifs[mfc->mfc_parent]); mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); return mr_route; err: - ipmr_cache_put(mfc); + mr_cache_put(mfc); list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) mlxsw_sp_mr_route_evif_unlink(rve); kfree(mr_route); return ERR_PTR(err); } -static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route *mr_route) +static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) { struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; mlxsw_sp_mr_route_ivif_unlink(mr_route); - ipmr_cache_put(mr_route->mfc4); + mr_cache_put(mr_route->mfc); list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) mlxsw_sp_mr_route_evif_unlink(rve); kfree(mr_route); } -static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route *mr_route) -{ - switch (mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_sp_mr_route4_destroy(mr_table, mr_route); - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } -} - static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route, bool offload) { - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - if (offload) - mr_route->mfc4->mfc_flags |= MFC_OFFLOAD; - else - mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD; - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } + if (offload) + mr_route->mfc->mfc_flags |= MFC_OFFLOAD; + else + mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD; } static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route) @@ -448,25 +409,18 @@ static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, mlxsw_sp_mr_route_destroy(mr_table, mr_route); } -int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc, bool replace) +int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc, bool replace) { struct mlxsw_sp_mr_route *mr_orig_route = NULL; struct mlxsw_sp_mr_route *mr_route; int err; - /* If the route is a (*,*) route, abort, as these kind of routes are - * used for proxy routes. - */ - if (mfc->mfc_origin == htonl(INADDR_ANY) && - mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { - dev_warn(mr_table->mlxsw_sp->bus_info->dev, - "Offloading proxy routes is not supported.\n"); + if (!mr_table->ops->is_route_valid(mr_table, mfc)) return -EINVAL; - } /* Create a new route */ - mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc); + mr_route = mlxsw_sp_mr_route_create(mr_table, mfc); if (IS_ERR(mr_route)) return PTR_ERR(mr_route); @@ -511,7 +465,7 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, &mr_orig_route->ht_node, mlxsw_sp_mr_route_ht_params); list_del(&mr_orig_route->node); - mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route); + mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route); } mlxsw_sp_mr_mfc_offload_update(mr_route); @@ -524,17 +478,17 @@ err_rhashtable_insert: list_del(&mr_route->node); err_no_orig_route: err_duplicate_route: - mlxsw_sp_mr_route4_destroy(mr_table, mr_route); + mlxsw_sp_mr_route_destroy(mr_table, mr_route); return err; } -void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc) +void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc) { struct mlxsw_sp_mr_route *mr_route; struct mlxsw_sp_mr_route_key key; - mlxsw_sp_mr_route4_key(mr_table, &key, mfc); + mr_table->ops->key_create(mr_table, &key, mfc); mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key, mlxsw_sp_mr_route_ht_params); if (mr_route) @@ -839,6 +793,125 @@ void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, } } +/* Protocol specific functions */ +static bool +mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *c) +{ + struct mfc_cache *mfc = (struct mfc_cache *) c; + + /* If the route is a (*,*) route, abort, as these kind of routes are + * used for proxy routes. + */ + if (mfc->mfc_origin == htonl(INADDR_ANY) && + mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + return false; + } + return true; +} + +static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *c) +{ + const struct mfc_cache *mfc = (struct mfc_cache *) c; + bool starg; + + starg = (mfc->mfc_origin == htonl(INADDR_ANY)); + + memset(key, 0, sizeof(*key)); + key->vrid = mr_table->vr_id; + key->proto = MLXSW_SP_L3_PROTO_IPV4; + key->group.addr4 = mfc->mfc_mcastgrp; + key->group_mask.addr4 = htonl(0xffffffff); + key->source.addr4 = mfc->mfc_origin; + key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); +} + +static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route) +{ + return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); +} + +static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif) +{ + return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); +} + +static bool +mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *c) +{ + struct mfc6_cache *mfc = (struct mfc6_cache *) c; + + /* If the route is a (*,*) route, abort, as these kind of routes are + * used for proxy routes. + */ + if (ipv6_addr_any(&mfc->mf6c_origin) && + ipv6_addr_any(&mfc->mf6c_mcastgrp)) { + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + return false; + } + return true; +} + +static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *c) +{ + const struct mfc6_cache *mfc = (struct mfc6_cache *) c; + + memset(key, 0, sizeof(*key)); + key->vrid = mr_table->vr_id; + key->proto = MLXSW_SP_L3_PROTO_IPV6; + key->group.addr6 = mfc->mf6c_mcastgrp; + memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6)); + key->source.addr6 = mfc->mf6c_origin; + if (!ipv6_addr_any(&mfc->mf6c_origin)) + memset(&key->source_mask.addr6, 0xff, + sizeof(key->source_mask.addr6)); +} + +static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route) +{ + return ipv6_addr_any(&mr_route->key.source_mask.addr6); +} + +static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif) +{ + return !(vif->vif_flags & MIFF_REGISTER); +} + +static struct +mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = { + { + .is_regular = mlxsw_sp_mr_vif4_is_regular, + }, + { + .is_regular = mlxsw_sp_mr_vif6_is_regular, + }, +}; + +static struct +mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = { + { + .is_route_valid = mlxsw_sp_mr_route4_validate, + .key_create = mlxsw_sp_mr_route4_key, + .is_route_starg = mlxsw_sp_mr_route4_starg, + }, + { + .is_route_valid = mlxsw_sp_mr_route6_validate, + .key_create = mlxsw_sp_mr_route6_key, + .is_route_starg = mlxsw_sp_mr_route6_starg, + }, + +}; + struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, u32 vr_id, enum mlxsw_sp_l3proto proto) @@ -847,6 +920,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, .key = { .vrid = vr_id, + .proto = proto, }, .value = { .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP, @@ -865,6 +939,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, mr_table->vr_id = vr_id; mr_table->mlxsw_sp = mlxsw_sp; mr_table->proto = proto; + mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto]; INIT_LIST_HEAD(&mr_table->route_list); err = rhashtable_init(&mr_table->route_ht, @@ -875,6 +950,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MAXVIFS; i++) { INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list); INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list); + mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto]; } err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, @@ -941,18 +1017,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, &bytes); - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - if (mr_route->mfc4->mfc_un.res.pkt != packets) - mr_route->mfc4->mfc_un.res.lastuse = jiffies; - mr_route->mfc4->mfc_un.res.pkt = packets; - mr_route->mfc4->mfc_un.res.bytes = bytes; - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } + if (mr_route->mfc->mfc_un.res.pkt != packets) + mr_route->mfc->mfc_un.res.lastuse = jiffies; + mr_route->mfc->mfc_un.res.pkt = packets; + mr_route->mfc->mfc_un.res.bytes = bytes; } static void mlxsw_sp_mr_stats_update(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index 5d26a122af49..7c864a86811d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -36,6 +36,7 @@ #define _MLXSW_SPECTRUM_MCROUTER_H #include <linux/mroute.h> +#include <linux/mroute6.h> #include "spectrum_router.h" #include "spectrum.h" @@ -109,10 +110,10 @@ struct mlxsw_sp_mr_table; int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_mr_ops *mr_ops); void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc, bool replace); -void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc); +int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc, bool replace); +void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc); int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, struct net_device *dev, vifi_t vif_index, unsigned long vif_flags, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 4c7f32d4288d..4f4c0d311883 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -51,7 +51,7 @@ struct mlxsw_sp_mr_tcam_region { }; struct mlxsw_sp_mr_tcam { - struct mlxsw_sp_mr_tcam_region ipv4_tcam_region; + struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; }; /* This struct maps to one RIGR2 register entry */ @@ -316,20 +316,37 @@ static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, mlxsw_afa_block_first_set(afa_block)); break; case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + key->group.addr6, + key->group_mask.addr6, + key->source.addr6, + key->source_mask.addr6, + mlxsw_afa_block_first_set(afa_block)); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); } static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, + struct mlxsw_sp_mr_route_key *key, struct parman_item *parman_item) { + struct in6_addr zero_addr = IN6ADDR_ANY_INIT; char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid, - 0, 0, 0, 0, 0, 0, NULL); + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, + vrid, 0, 0, 0, 0, 0, 0, NULL); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, + vrid, 0, 0, zero_addr, zero_addr, + zero_addr, zero_addr, NULL); + break; + } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); } @@ -353,27 +370,30 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, return 0; } +static struct mlxsw_sp_mr_tcam_region * +mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + return &mr_tcam->tcam_regions[proto]; +} + static int mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, struct mlxsw_sp_mr_tcam_route *route, enum mlxsw_sp_mr_route_prio prio) { - struct parman_prio *parman_prio = NULL; + struct mlxsw_sp_mr_tcam_region *tcam_region; int err; - switch (route->key.proto) { - case MLXSW_SP_L3_PROTO_IPV4: - parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio]; - err = parman_item_add(mr_tcam->ipv4_tcam_region.parman, - parman_prio, &route->parman_item); - if (err) - return err; - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); - } - route->parman_prio = parman_prio; + tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, + route->key.proto); + err = parman_item_add(tcam_region->parman, + &tcam_region->parman_prios[prio], + &route->parman_item); + if (err) + return err; + + route->parman_prio = &tcam_region->parman_prios[prio]; return 0; } @@ -381,15 +401,13 @@ static void mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, struct mlxsw_sp_mr_tcam_route *route) { - switch (route->key.proto) { - case MLXSW_SP_L3_PROTO_IPV4: - parman_item_remove(mr_tcam->ipv4_tcam_region.parman, - route->parman_prio, &route->parman_item); - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); - } + struct mlxsw_sp_mr_tcam_region *tcam_region; + + tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, + route->key.proto); + + parman_item_remove(tcam_region->parman, + route->parman_prio, &route->parman_item); } static int @@ -462,7 +480,7 @@ static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam *mr_tcam = priv; mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, - &route->parman_item); + &route->key, &route->parman_item); mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); @@ -806,21 +824,42 @@ mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { struct mlxsw_sp_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + u32 rtar_key; + int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) return -EIO; - return mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - &mr_tcam->ipv4_tcam_region, - MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST); + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; + err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV4], + rtar_key); + if (err) + return err; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; + err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV6], + rtar_key); + if (err) + goto err_ipv6_region_init; + + return 0; + +err_ipv6_region_init: + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + return err; } static void mlxsw_sp_mr_tcam_fini(void *priv) { struct mlxsw_sp_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region); + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); } const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 0b7670459051..91262b0573e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -42,6 +42,8 @@ #include "reg.h" #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) +#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \ + MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1)) enum mlxsw_sp_qdisc_type { MLXSW_SP_QDISC_NO_QDISC, @@ -76,6 +78,7 @@ struct mlxsw_sp_qdisc_ops { struct mlxsw_sp_qdisc { u32 handle; u8 tclass_num; + u8 prio_bitmap; union { struct red_stats red; } xstats_base; @@ -99,6 +102,44 @@ mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, mlxsw_sp_qdisc->handle == handle; } +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, + bool root_only) +{ + int tclass, child_index; + + if (parent == TC_H_ROOT) + return mlxsw_sp_port->root_qdisc; + + if (root_only || !mlxsw_sp_port->root_qdisc || + !mlxsw_sp_port->root_qdisc->ops || + TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle || + TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS) + return NULL; + + child_index = TC_H_MIN(parent); + tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index); + return &mlxsw_sp_port->tclass_qdiscs[tclass]; +} + +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle) +{ + int i; + + if (mlxsw_sp_port->root_qdisc->handle == handle) + return mlxsw_sp_port->root_qdisc; + + if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC) + return NULL; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle) + return &mlxsw_sp_port->tclass_qdiscs[i]; + + return NULL; +} + static int mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) @@ -185,6 +226,23 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static void +mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, + u8 prio_bitmap, u64 *tx_packets, + u64 *tx_bytes) +{ + int i; + + *tx_packets = 0; + *tx_bytes = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (prio_bitmap & BIT(i)) { + *tx_packets += xstats->tx_packets[i]; + *tx_bytes += xstats->tx_bytes[i]; + } + } +} + static int mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, int tclass_num, u32 min, u32 max, @@ -230,17 +288,16 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; - struct rtnl_link_stats64 *stats; struct red_stats *red_base; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - stats = &mlxsw_sp_port->periodic_hw_stats.stats; stats_base = &mlxsw_sp_qdisc->stats_base; red_base = &mlxsw_sp_qdisc->xstats_base.red; - stats_base->tx_packets = stats->tx_packets; - stats_base->tx_bytes = stats->tx_bytes; - + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, + mlxsw_sp_qdisc->prio_bitmap, + &stats_base->tx_packets, + &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; red_base->pdrop = xstats->tail_drop[tclass_num]; @@ -255,6 +312,12 @@ static int mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { + struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc; + + if (root_qdisc != mlxsw_sp_qdisc) + root_qdisc->stats_base.backlog -= + mlxsw_sp_qdisc->stats_base.backlog; + return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, mlxsw_sp_qdisc->tclass_num); } @@ -319,6 +382,7 @@ mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc->stats_base.backlog); p->qstats->backlog -= backlog; + mlxsw_sp_qdisc->stats_base.backlog = 0; } static int @@ -357,14 +421,16 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; - struct rtnl_link_stats64 *stats; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - stats = &mlxsw_sp_port->periodic_hw_stats.stats; stats_base = &mlxsw_sp_qdisc->stats_base; - tx_bytes = stats->tx_bytes - stats_base->tx_bytes; - tx_packets = stats->tx_packets - stats_base->tx_packets; + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, + mlxsw_sp_qdisc->prio_bitmap, + &tx_packets, &tx_bytes); + tx_bytes = tx_bytes - stats_base->tx_bytes; + tx_packets = tx_packets - stats_base->tx_packets; + overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - @@ -406,11 +472,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - if (p->parent != TC_H_ROOT) + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); + if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; - mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; - if (p->command == TC_RED_REPLACE) return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, @@ -441,9 +506,13 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, { int i; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, MLXSW_SP_PORT_DEFAULT_TCLASS); + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, + &mlxsw_sp_port->tclass_qdiscs[i]); + mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0; + } return 0; } @@ -467,16 +536,41 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, void *params) { struct tc_prio_qopt_offload_params *p = params; - int tclass, i; + struct mlxsw_sp_qdisc *child_qdisc; + int tclass, i, band, backlog; + u8 old_priomap; int err; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]); - err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass); - if (err) - return err; + for (band = 0; band < p->bands; band++) { + tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; + old_priomap = child_qdisc->prio_bitmap; + child_qdisc->prio_bitmap = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (p->priomap[i] == band) { + child_qdisc->prio_bitmap |= BIT(i); + if (BIT(i) & old_priomap) + continue; + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, + i, tclass); + if (err) + return err; + } + } + if (old_priomap != child_qdisc->prio_bitmap && + child_qdisc->ops && child_qdisc->ops->clean_stats) { + backlog = child_qdisc->stats_base.backlog; + child_qdisc->ops->clean_stats(mlxsw_sp_port, + child_qdisc); + child_qdisc->stats_base.backlog = backlog; + } + } + for (; band < IEEE_8021QAZ_MAX_TCS; band++) { + tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; + child_qdisc->prio_bitmap = 0; + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); } - return 0; } @@ -513,6 +607,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { drops += xstats->tail_drop[i]; + drops += xstats->wred_drop[i]; backlog += xstats->backlog[i]; } drops = drops - stats_base->drops; @@ -548,8 +643,10 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->tx_bytes = stats->tx_bytes; stats_base->drops = 0; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += xstats->wred_drop[i]; + } mlxsw_sp_qdisc->stats_base.backlog = 0; } @@ -564,15 +661,48 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, }; +/* Grafting is not supported in mlxsw. It will result in un-offloading of the + * grafted qdisc as well as the qdisc in the qdisc new location. + * (However, if the graft is to the location where the qdisc is already at, it + * will be ignored completely and won't cause un-offloading). + */ +static int +mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_prio_qopt_offload_graft_params *p) +{ + int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band); + struct mlxsw_sp_qdisc *old_qdisc; + + /* Check if the grafted qdisc is already in its "new" location. If so - + * nothing needs to be done. + */ + if (p->band < IEEE_8021QAZ_MAX_TCS && + mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) + return 0; + + /* See if the grafted qdisc is already offloaded on any tclass. If so, + * unoffload it. + */ + old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, + p->child_handle); + if (old_qdisc) + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc); + + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, + &mlxsw_sp_port->tclass_qdiscs[tclass_num]); + return -EOPNOTSUPP; +} + int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - if (p->parent != TC_H_ROOT) + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); + if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; - mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; if (p->command == TC_PRIO_REPLACE) return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, @@ -589,6 +719,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, case TC_PRIO_STATS: return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); + case TC_PRIO_GRAFT: + return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->graft_params); default: return -EOPNOTSUPP; } @@ -596,17 +729,36 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) { - mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc), - GFP_KERNEL); - if (!mlxsw_sp_port->root_qdisc) - return -ENOMEM; + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + int i; + mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL); + if (!mlxsw_sp_qdisc) + goto err_root_qdisc_init; + + mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc; + mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff; mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS, + GFP_KERNEL); + if (!mlxsw_sp_qdisc) + goto err_tclass_qdiscs_init; + + mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i; + return 0; + +err_tclass_qdiscs_init: + kfree(mlxsw_sp_port->root_qdisc); +err_root_qdisc_init: + return -ENOMEM; } void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + kfree(mlxsw_sp_port->tclass_qdiscs); kfree(mlxsw_sp_port->root_qdisc); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0b25baba09a..1904c0323d39 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1,10 +1,10 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c - * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -70,6 +70,7 @@ #include "spectrum_mr.h" #include "spectrum_mr_tcam.h" #include "spectrum_router.h" +#include "spectrum_span.h" struct mlxsw_sp_fib; struct mlxsw_sp_vr; @@ -466,7 +467,7 @@ struct mlxsw_sp_vr { unsigned int rif_count; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; - struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -710,7 +711,9 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table; + return !!vr->fib4 || !!vr->fib6 || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -788,45 +791,61 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id, struct netlink_ext_ack *extack) { + struct mlxsw_sp_mr_table *mr4_table, *mr6_table; + struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); } - vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->fib4)) - return ERR_CAST(vr->fib4); - vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); - if (IS_ERR(vr->fib6)) { - err = PTR_ERR(vr->fib6); + fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(fib4)) + return ERR_CAST(fib4); + fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib6)) { + err = PTR_ERR(fib6); goto err_fib6_create; } - vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, - MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->mr4_table)) { - err = PTR_ERR(vr->mr4_table); - goto err_mr_table_create; + mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(mr4_table)) { + err = PTR_ERR(mr4_table); + goto err_mr4_table_create; } + mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(mr6_table)) { + err = PTR_ERR(mr6_table); + goto err_mr6_table_create; + } + + vr->fib4 = fib4; + vr->fib6 = fib6; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table; vr->tb_id = tb_id; return vr; -err_mr_table_create: - mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); - vr->fib6 = NULL; +err_mr6_table_create: + mlxsw_sp_mr_table_destroy(mr4_table); +err_mr4_table_create: + mlxsw_sp_fib_destroy(mlxsw_sp, fib6); err_fib6_create: - mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); - vr->fib4 = NULL; + mlxsw_sp_fib_destroy(mlxsw_sp, fib4); return ERR_PTR(err); } static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { - mlxsw_sp_mr_table_destroy(vr->mr4_table); - vr->mr4_table = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); vr->fib6 = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); @@ -849,7 +868,8 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { if (!vr->rif_count && list_empty(&vr->fib4->node_list) && list_empty(&vr->fib6->node_list) && - mlxsw_sp_mr_table_empty(vr->mr4_table)) + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) && + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6])) mlxsw_sp_vr_destroy(mlxsw_sp, vr); } @@ -1020,9 +1040,11 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_ipip_type ipipt, struct net_device *ol_dev) { + const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL; + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); if (!ipip_entry) return ERR_PTR(-ENOMEM); @@ -1036,7 +1058,15 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; - ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev); + + switch (ipip_ops->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON(1); + break; + } return ipip_entry; @@ -1376,6 +1406,55 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp, decap_fib_entry); } +static int +mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, + struct mlxsw_sp_vr *ul_vr, bool enable) +{ + struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + struct mlxsw_sp_rif *rif = &lb_rif->common; + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u32 saddr4; + + switch (lb_cf.ul_protocol) { + case MLXSW_SP_L3_PROTO_IPV4: + saddr4 = be32_to_cpu(lb_cf.saddr.addr4); + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, + MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, + ul_vr->id, saddr4, lb_cf.okey); + break; + + case MLXSW_SP_L3_PROTO_IPV6: + return -EAFNOSUPPORT; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + struct mlxsw_sp_rif_ipip_lb *lb_rif; + struct mlxsw_sp_vr *ul_vr; + int err = 0; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (ipip_entry) { + lb_rif = ipip_entry->ol_lb; + ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true); + if (err) + goto out; + lb_rif->common.mtu = ol_dev->mtu; + } + +out: + return err; +} + static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { @@ -1656,6 +1735,8 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, extack = info->extack; return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, ol_dev, extack); + case NETDEV_CHANGEMTU: + return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev); } return 0; } @@ -2316,6 +2397,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) read_unlock_bh(&n->lock); rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + entry_connected = nud_state & NUD_VALID && !dead; neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!entry_connected && !neigh_entry) @@ -2413,7 +2496,8 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, mlxsw_core_schedule_work(&net_work->work); mlxsw_sp_port_dev_put(mlxsw_sp_port); break; - case NETEVENT_MULTIPATH_HASH_UPDATE: + case NETEVENT_IPV4_MPATH_HASH_UPDATE: + case NETEVENT_IPV6_MPATH_HASH_UPDATE: net = ptr; if (!net_eq(net, &init_net)) @@ -3790,6 +3874,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; int i; + if (!list_is_singular(&nh_grp->fib_list)) + return; + for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; @@ -5357,10 +5444,20 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, return 0; } +static struct mlxsw_sp_mr_table * +mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family) +{ + if (family == RTNL_FAMILY_IPMR) + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]; + else + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; +} + static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info, bool replace) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5370,12 +5467,14 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(vr)) return PTR_ERR(vr); - return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace); } static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5385,7 +5484,8 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!vr)) return; - mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + mlxsw_sp_mr_route_del(mrt, men_info->mfc); mlxsw_sp_vr_put(mlxsw_sp, vr); } @@ -5393,6 +5493,7 @@ static int mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr; @@ -5403,8 +5504,9 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(vr)) return PTR_ERR(vr); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); - return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev, + return mlxsw_sp_mr_vif_add(mrt, ven_info->dev, ven_info->vif_index, ven_info->vif_flags, rif); } @@ -5413,6 +5515,7 @@ static void mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5422,7 +5525,8 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!vr)) return; - mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); + mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index); mlxsw_sp_vr_put(mlxsw_sp, vr); } @@ -5514,7 +5618,7 @@ static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - int i; + int i, j; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; @@ -5522,7 +5626,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; - mlxsw_sp_mr_table_flush(vr->mr4_table); + for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++) + mlxsw_sp_mr_table_flush(vr->mr_table[j]); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); /* If virtual router was only used for IPv4, then it's no @@ -5572,6 +5677,8 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) /* Protect internal structures from changes */ rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ @@ -5614,6 +5721,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) int err; rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: @@ -5657,11 +5766,11 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_ENTRY_DEL: mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, @@ -5741,7 +5850,7 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); - ipmr_cache_hold(fib_work->men_info.mfc); + mr_cache_hold(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: /* fall through */ case FIB_EVENT_VIF_DEL: @@ -5783,10 +5892,14 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, if (!ipmr_rule_default(rule) && !rule->l3mdev) err = -1; break; + case RTNL_FAMILY_IP6MR: + if (!ip6mr_rule_default(rule) && !rule->l3mdev) + err = -1; + break; } if (err < 0) - NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload"); + NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload"); return err; } @@ -5802,7 +5915,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (!net_eq(info->net, &init_net) || (info->family != AF_INET && info->family != AF_INET6 && - info->family != RTNL_FAMILY_IPMR)) + info->family != RTNL_FAMILY_IPMR && + info->family != RTNL_FAMILY_IP6MR)) return NOTIFY_DONE; router = container_of(nb, struct mlxsw_sp_router, fib_nb); @@ -5832,6 +5946,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); mlxsw_sp_router_fib6_event(fib_work, info); break; + case RTNL_FAMILY_IP6MR: case RTNL_FAMILY_IPMR: INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); mlxsw_sp_router_fibmr_event(fib_work, info); @@ -6013,7 +6128,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr; u16 rif_index; - int err; + int i, err; type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); ops = mlxsw_sp->router->rif_ops_arr[type]; @@ -6025,7 +6140,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); if (err) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); goto err_rif_index_alloc; } @@ -6053,9 +6168,11 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_configure; - err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif); - if (err) - goto err_mr_rif_add; + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { + err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif); + if (err) + goto err_mr_rif_add; + } mlxsw_sp_rif_counters_alloc(rif); mlxsw_sp->router->rifs[rif_index] = rif; @@ -6063,6 +6180,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, return rif; err_mr_rif_add: + for (i--; i >= 0; i--) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); err_configure: if (fid) @@ -6082,13 +6201,15 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; struct mlxsw_sp_fid *fid = rif->fid; struct mlxsw_sp_vr *vr; + int i; mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id]; mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp_rif_counters_free(rif); - mlxsw_sp_mr_rif_del(vr->mr4_table, rif); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); if (fid) /* Loopback RIFs are not associated with a FID. */ @@ -6495,13 +6616,16 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) if (rif->mtu != dev->mtu) { struct mlxsw_sp_vr *vr; + int i; /* The RIF is relevant only to its mr_table instance, as unlike * unicast routing, in multicast routing a RIF cannot be shared * between several multicast routing tables. */ vr = &mlxsw_sp->router->vrs[rif->vr_id]; - mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i], + rif, dev->mtu); } ether_addr_copy(rif->addr, dev->dev_addr); @@ -6837,33 +6961,6 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, } static int -mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, - struct mlxsw_sp_vr *ul_vr, bool enable) -{ - struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; - struct mlxsw_sp_rif *rif = &lb_rif->common; - struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; - char ritr_pl[MLXSW_REG_RITR_LEN]; - u32 saddr4; - - switch (lb_cf.ul_protocol) { - case MLXSW_SP_L3_PROTO_IPV4: - saddr4 = be32_to_cpu(lb_cf.saddr.addr4); - mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, - rif->rif_index, rif->vr_id, rif->dev->mtu); - mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, - MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr->id, saddr4, lb_cf.okey); - break; - - case MLXSW_SP_L3_PROTO_IPV6: - return -EAFNOSUPPORT; - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static int mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); @@ -7006,13 +7103,25 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl) static void mlxsw_sp_mp6_hash_init(char *recr2_pl) { + bool only_l3 = !ip6_multipath_hash_policy(&init_net); + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); - mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL); mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); + if (only_l3) { + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_IPV6_FLOW_LABEL); + } else { + mlxsw_sp_mp_hash_header_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_EN_IPV6); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_SPORT); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_DPORT); + } } static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 1fb82246ce96..a01edcf56797 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -41,6 +41,7 @@ enum mlxsw_sp_l3proto { MLXSW_SP_L3_PROTO_IPV4, MLXSW_SP_L3_PROTO_IPV6, +#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) }; union mlxsw_sp_l3addr { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c new file mode 100644 index 000000000000..65a77708ff61 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -0,0 +1,824 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Petr Machata <petrm@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/list.h> +#include <net/arp.h> +#include <net/gre.h> +#include <net/ndisc.h> +#include <net/ip6_tunnel.h> + +#include "spectrum.h" +#include "spectrum_span.h" +#include "spectrum_ipip.h" + +int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) + return -EIO; + + mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_SPAN); + mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, + sizeof(struct mlxsw_sp_span_entry), + GFP_KERNEL); + if (!mlxsw_sp->span.entries) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + INIT_LIST_HEAD(&curr->bound_ports_list); + curr->id = i; + } + + return 0; +} + +void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); + } + kfree(mlxsw_sp->span.entries); +} + +static int +mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = netdev_priv(to_dev); + return 0; +} + +static int +mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_reg_mpat_span_type span_type) +{ + struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { + .can_handle = mlxsw_sp_port_dev_check, + .parms = mlxsw_sp_span_entry_phys_parms, + .configure = mlxsw_sp_span_entry_phys_configure, + .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, +}; + +static int mlxsw_sp_span_dmac(struct neigh_table *tbl, + const void *pkey, + struct net_device *l3edev, + unsigned char dmac[ETH_ALEN]) +{ + struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev); + int err = 0; + + if (!neigh) { + neigh = neigh_create(tbl, pkey, l3edev); + if (IS_ERR(neigh)) + return PTR_ERR(neigh); + } + + neigh_event_send(neigh, NULL); + + read_lock_bh(&neigh->lock); + if ((neigh->nud_state & NUD_VALID) && !neigh->dead) + memcpy(dmac, neigh->ha, ETH_ALEN); + else + err = -ENOENT; + read_unlock_bh(&neigh->lock); + + neigh_release(neigh); + return err; +} + +static int +mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = NULL; + return 0; +} + +static __maybe_unused int +mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, + union mlxsw_sp_l3addr saddr, + union mlxsw_sp_l3addr daddr, + union mlxsw_sp_l3addr gw, + __u8 ttl, + struct neigh_table *tbl, + struct mlxsw_sp_span_parms *sparmsp) +{ + unsigned char dmac[ETH_ALEN]; + + if (mlxsw_sp_l3addr_is_zero(gw)) + gw = daddr; + + if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) || + mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + sparmsp->dest_port = netdev_priv(l3edev); + sparmsp->ttl = ttl; + memcpy(sparmsp->dmac, dmac, ETH_ALEN); + memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); + sparmsp->saddr = saddr; + sparmsp->daddr = daddr; + return 0; +} + +#if IS_ENABLED(CONFIG_NET_IPGRE) +static struct net_device * +mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, + __be32 *saddrp, __be32 *daddrp) +{ + struct ip_tunnel *tun = netdev_priv(to_dev); + struct net_device *dev = NULL; + struct ip_tunnel_parm parms; + struct rtable *rt = NULL; + struct flowi4 fl4; + + /* We assume "dev" stays valid after rt is put. */ + ASSERT_RTNL(); + + parms = mlxsw_sp_ipip_netdev_parms4(to_dev); + ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, + 0, 0, parms.link, tun->fwmark); + + rt = ip_route_output_key(tun->net, &fl4); + if (IS_ERR(rt)) + return NULL; + + if (rt->rt_type != RTN_UNICAST) + goto out; + + dev = rt->dst.dev; + *saddrp = fl4.saddr; + *daddrp = rt->rt_gateway; + +out: + ip_rt_put(rt); + return dev; +} + +static int +mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); + union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; + union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; + bool inherit_tos = tparm.iph.tos & 0x1; + bool inherit_ttl = !tparm.iph.ttl; + union mlxsw_sp_l3addr gw = daddr; + struct net_device *l3edev; + + if (!(to_dev->flags & IFF_UP) || + /* Reject tunnels with GRE keys, checksums, etc. */ + tparm.i_flags || tparm.o_flags || + /* Require a fixed TTL and a TOS copied from the mirrored packet. */ + inherit_ttl || !inherit_tos || + /* A destination address may not be "any". */ + mlxsw_sp_l3addr_is_zero(daddr)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); + return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, + tparm.iph.ttl, + &arp_tbl, sparmsp); +} + +static int +mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, + sparms.dmac, false); + mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, + sparms.ttl, sparms.smac, + be32_to_cpu(sparms.saddr.addr4), + be32_to_cpu(sparms.daddr.addr4)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); +} + +static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { + .can_handle = is_gretap_dev, + .parms = mlxsw_sp_span_entry_gretap4_parms, + .configure = mlxsw_sp_span_entry_gretap4_configure, + .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, +}; +#endif + +#if IS_ENABLED(CONFIG_IPV6_GRE) +static struct net_device * +mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, + struct in6_addr *saddrp, + struct in6_addr *daddrp) +{ + struct ip6_tnl *t = netdev_priv(to_dev); + struct flowi6 fl6 = t->fl.u.ip6; + struct net_device *dev = NULL; + struct dst_entry *dst; + struct rt6_info *rt6; + + /* We assume "dev" stays valid after dst is released. */ + ASSERT_RTNL(); + + fl6.flowi6_mark = t->parms.fwmark; + if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) + return NULL; + + dst = ip6_route_output(t->net, NULL, &fl6); + if (!dst || dst->error) + goto out; + + rt6 = container_of(dst, struct rt6_info, dst); + + dev = dst->dev; + *saddrp = fl6.saddr; + *daddrp = rt6->rt6i_gateway; + +out: + dst_release(dst); + return dev; +} + +static int +mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); + bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; + union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; + union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; + bool inherit_ttl = !tparm.hop_limit; + union mlxsw_sp_l3addr gw = daddr; + struct net_device *l3edev; + + if (!(to_dev->flags & IFF_UP) || + /* Reject tunnels with GRE keys, checksums, etc. */ + tparm.i_flags || tparm.o_flags || + /* Require a fixed TTL and a TOS copied from the mirrored packet. */ + inherit_ttl || !inherit_tos || + /* A destination address may not be "any". */ + mlxsw_sp_l3addr_is_zero(daddr)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); + return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, + tparm.hop_limit, + &nd_tbl, sparmsp); +} + +static int +mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, + sparms.dmac, false); + mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, + sparms.saddr.addr6, + sparms.daddr.addr6); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { + .can_handle = is_ip6gretap_dev, + .parms = mlxsw_sp_span_entry_gretap6_parms, + .configure = mlxsw_sp_span_entry_gretap6_configure, + .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, +}; +#endif + +static const +struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { + &mlxsw_sp_span_entry_ops_phys, +#if IS_ENABLED(CONFIG_NET_IPGRE) + &mlxsw_sp_span_entry_ops_gretap4, +#endif +#if IS_ENABLED(CONFIG_IPV6_GRE) + &mlxsw_sp_span_entry_ops_gretap6, +#endif +}; + +static int +mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + return mlxsw_sp_span_entry_unoffloadable(sparmsp); +} + +static int +mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + return 0; +} + +static void +mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ +} + +static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { + .parms = mlxsw_sp_span_entry_nop_parms, + .configure = mlxsw_sp_span_entry_nop_configure, + .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, +}; + +static void +mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + if (sparms.dest_port) { + if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { + netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", + sparms.dest_port->dev->name); + sparms.dest_port = NULL; + } else if (span_entry->ops->configure(span_entry, sparms)) { + netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", + sparms.dest_port->dev->name); + sparms.dest_port = NULL; + } + } + + span_entry->parms = sparms; +} + +static void +mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + if (span_entry->parms.dest_port) + span_entry->ops->deconfigure(span_entry); +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_entry_ops *ops, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_span_entry *span_entry = NULL; + int i; + + /* find a free entry to use */ + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + if (!mlxsw_sp->span.entries[i].ref_count) { + span_entry = &mlxsw_sp->span.entries[i]; + break; + } + } + if (!span_entry) + return NULL; + + span_entry->ops = ops; + span_entry->ref_count = 1; + span_entry->to_dev = to_dev; + mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); + + return span_entry; +} + +static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure(span_entry); +} + +struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->ref_count && curr->to_dev == to_dev) + return curr; + } + return NULL; +} + +void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure(span_entry); + span_entry->ops = &mlxsw_sp_span_entry_ops_nop; +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->ref_count && curr->id == span_id) + return curr; + } + return NULL; +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_entry_ops *ops, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); + if (span_entry) { + /* Already exists, just take a reference */ + span_entry->ref_count++; + return span_entry; + } + + return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); +} + +static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + WARN_ON(!span_entry->ref_count); + if (--span_entry->ref_count == 0) + mlxsw_sp_span_entry_destroy(span_entry); + return 0; +} + +static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_inspected_port *p; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + list_for_each_entry(p, &curr->bound_ports_list, list) + if (p->local_port == port->local_port && + p->type == MLXSW_SP_SPAN_EGRESS) + return true; + } + + return false; +} + +static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, + int mtu) +{ + return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; +} + +int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int err; + + /* If port is egress mirrored, the shared buffer size should be + * updated according to the mtu value + */ + if (mlxsw_sp_span_is_egress_mirror(port)) { + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); + return err; + } + } + + return 0; +} + +static struct mlxsw_sp_span_inspected_port * +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + struct mlxsw_sp_port *port, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *p; + + list_for_each_entry(p, &span_entry->bound_ports_list, list) + if (type == p->type && + port->local_port == p->local_port && + bind == p->bound) + return p; + return NULL; +} + +static int +mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + int pa_id = span_entry->id; + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, + (enum mlxsw_reg_mpar_i_e)type, bind, pa_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); +} + +static int +mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int i; + int err; + + /* A given (source port, direction) can only be bound to one analyzer, + * so if a binding is requested, check for conflicts. + */ + if (bind) + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = + &mlxsw_sp->span.entries[i]; + + if (mlxsw_sp_span_entry_bound_port_find(curr, type, + port, bind)) + return -EEXIST; + } + + /* if it is an egress SPAN, bind a shared buffer to it */ + if (type == MLXSW_SP_SPAN_EGRESS) { + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, + port->dev->mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); + return err; + } + } + + if (bind) { + err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + true); + if (err) + goto err_port_bind; + } + + inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); + if (!inspected_port) { + err = -ENOMEM; + goto err_inspected_port_alloc; + } + inspected_port->local_port = port->local_port; + inspected_port->type = type; + inspected_port->bound = bind; + list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); + + return 0; + +err_inspected_port_alloc: + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); +err_port_bind: + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + return err; +} + +static void +mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + + inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, + port, bind); + if (!inspected_port) + return; + + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); + /* remove the SBIB buffer if it was egress SPAN */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + + list_del(&inspected_port->list); + kfree(inspected_port); +} + +static const struct mlxsw_sp_span_entry_ops * +mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) + if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) + return mlxsw_sp_span_entry_types[i]; + + return NULL; +} + +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + const struct net_device *to_dev, + enum mlxsw_sp_span_type type, bool bind, + int *p_span_id) +{ + struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; + const struct mlxsw_sp_span_entry_ops *ops; + struct mlxsw_sp_span_parms sparms = {NULL}; + struct mlxsw_sp_span_entry *span_entry; + int err; + + ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); + if (!ops) { + netdev_err(to_dev, "Cannot mirror to %s", to_dev->name); + return -EOPNOTSUPP; + } + + err = ops->parms(to_dev, &sparms); + if (err) + return err; + + span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); + if (!span_entry) + return -ENOENT; + + netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", + span_entry->id); + + err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); + if (err) + goto err_port_bind; + + *p_span_id = span_entry->id; + return 0; + +err_port_bind: + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + return err; +} + +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, + enum mlxsw_sp_span_type type, bool bind) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id); + if (!span_entry) { + netdev_err(from->dev, "no span entry found\n"); + return; + } + + netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", + span_entry->id); + mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); +} + +void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) +{ + int i; + int err; + + ASSERT_RTNL(); + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + struct mlxsw_sp_span_parms sparms = {NULL}; + + if (!curr->ref_count) + continue; + + err = curr->ops->parms(curr->to_dev, &sparms); + if (err) + continue; + + if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { + mlxsw_sp_span_entry_deconfigure(curr); + mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); + } + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h new file mode 100644 index 000000000000..4b87ec20e658 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -0,0 +1,107 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_SPAN_H +#define _MLXSW_SPECTRUM_SPAN_H + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "spectrum_router.h" + +struct mlxsw_sp; +struct mlxsw_sp_port; + +enum mlxsw_sp_span_type { + MLXSW_SP_SPAN_EGRESS, + MLXSW_SP_SPAN_INGRESS +}; + +struct mlxsw_sp_span_inspected_port { + struct list_head list; + enum mlxsw_sp_span_type type; + u8 local_port; + + /* Whether this is a directly bound mirror (port-to-port) or an ACL. */ + bool bound; +}; + +struct mlxsw_sp_span_parms { + struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */ + unsigned int ttl; + unsigned char dmac[ETH_ALEN]; + unsigned char smac[ETH_ALEN]; + union mlxsw_sp_l3addr daddr; + union mlxsw_sp_l3addr saddr; +}; + +struct mlxsw_sp_span_entry_ops; + +struct mlxsw_sp_span_entry { + const struct net_device *to_dev; + const struct mlxsw_sp_span_entry_ops *ops; + struct mlxsw_sp_span_parms parms; + struct list_head bound_ports_list; + int ref_count; + int id; +}; + +struct mlxsw_sp_span_entry_ops { + bool (*can_handle)(const struct net_device *to_dev); + int (*parms)(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp); + int (*configure)(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms); + void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry); +}; + +int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp); + +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + const struct net_device *to_dev, + enum mlxsw_sp_span_type type, + bool bind, int *p_span_id); +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, + enum mlxsw_sp_span_type type, bool bind); +struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev); + +void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry); + +int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 593ad31be749..c11c9a635866 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, action, local_port); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, bool adding, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, lag_vid, lag_id); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, u16 fid, u16 mid_idx, bool adding) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: kfree(sfd_pl); return err; } @@ -1819,7 +1844,7 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; if (is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge"); + NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); return -EINVAL; } @@ -1882,20 +1907,16 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct netlink_ext_ack *extack) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct net_device *dev = bridge_port->dev; u16 vid; - if (!is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge"); - return -EINVAL; - } - vid = vlan_dev_vlan_id(bridge_port->dev); - + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return -EINVAL; if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port"); + NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); return -EINVAL; } @@ -1912,8 +1933,10 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - u16 vid = vlan_dev_vlan_id(bridge_port->dev); + struct net_device *dev = bridge_port->dev; + u16 vid; + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index ab7a29846bfa..c698ec4fd9d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -510,7 +510,6 @@ static const struct mlxsw_config_profile mlxsw_sib_config_profile = { .type = MLXSW_PORT_SWID_TYPE_IB, } }, - .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sib_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index f3c29bbf07e2..a655c5850aa6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -789,7 +789,7 @@ mlxsw_sx_port_get_link_ksettings(struct net_device *dev, u32 supported, advertising, lp_advertising; int err; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); @@ -879,7 +879,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev, mlxsw_sx_to_ptys_advert_link(advertising) : mlxsw_sx_to_ptys_speed(speed); - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); @@ -897,7 +897,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev, return 0; mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, - eth_proto_new); + eth_proto_new, true); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to set proto admin"); @@ -1029,7 +1029,7 @@ mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width) eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, - eth_proto_admin); + eth_proto_admin, true); return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); } @@ -1706,7 +1706,6 @@ static const struct mlxsw_config_profile mlxsw_sx_config_profile = { .type = MLXSW_PORT_SWID_TYPE_IB, } }, - .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index ec6cef8267ae..399e9d6993f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -77,6 +77,7 @@ enum { MLXSW_TRAP_ID_IPV6_DHCP = 0x69, MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_IPV6_PIM = 0x79, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 36a09d94b368..71dca8bd51ac 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -42,4 +42,14 @@ config ENCX24J600 To compile this driver as a module, choose M here. The module will be called encx24j600. +config LAN743X + tristate "LAN743x support" + depends on PCI + select PHYLIB + ---help--- + Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip + + To compile this driver as a module, choose M here. The module will be + called lan743x. + endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index ff78f621b59a..2e982cc249fb 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -4,3 +4,6 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o +obj-$(CONFIG_LAN743X) += lan743x.o + +lan743x-objs := lan743x_main.o diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c new file mode 100644 index 000000000000..dd947e4dd3ce --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -0,0 +1,2771 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/microchipphy.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/rtnetlink.h> +#include <linux/iopoll.h> +#include "lan743x_main.h" + +static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) +{ + pci_release_selected_regions(adapter->pdev, + pci_select_bars(adapter->pdev, + IORESOURCE_MEM)); + pci_disable_device(adapter->pdev); +} + +static int lan743x_pci_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + unsigned long bars = 0; + int ret; + + adapter->pdev = pdev; + ret = pci_enable_device_mem(pdev); + if (ret) + goto return_error; + + netif_info(adapter, probe, adapter->netdev, + "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", + pdev->vendor, pdev->device); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (!test_bit(0, &bars)) + goto disable_device; + + ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); + if (ret) + goto disable_device; + + pci_set_master(pdev); + return 0; + +disable_device: + pci_disable_device(adapter->pdev); + +return_error: + return ret; +} + +static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) +{ + return ioread32(&adapter->csr.csr_address[offset]); +} + +static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, + u32 data) +{ + iowrite32(data, &adapter->csr.csr_address[offset]); +} + +#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) + +static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) +{ + u32 data; + + data = lan743x_csr_read(adapter, HW_CFG); + data |= HW_CFG_LRST_; + lan743x_csr_write(adapter, HW_CFG, data); + + return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, + !(data & HW_CFG_LRST_), 100000, 10000000); +} + +static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, + int offset, u32 bit_mask, + int target_value, int usleep_min, + int usleep_max, int count) +{ + u32 data; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, + target_value == ((data & bit_mask) ? 1 : 0), + usleep_max, usleep_min * count); +} + +static int lan743x_csr_init(struct lan743x_adapter *adapter) +{ + struct lan743x_csr *csr = &adapter->csr; + resource_size_t bar_start, bar_length; + int result; + + bar_start = pci_resource_start(adapter->pdev, 0); + bar_length = pci_resource_len(adapter->pdev, 0); + csr->csr_address = devm_ioremap(&adapter->pdev->dev, + bar_start, bar_length); + if (!csr->csr_address) { + result = -ENOMEM; + goto clean_up; + } + + csr->id_rev = lan743x_csr_read(adapter, ID_REV); + csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); + netif_info(adapter, probe, adapter->netdev, + "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", + csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), + FPGA_REV_GET_MINOR_(csr->fpga_rev)); + if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) { + result = -ENODEV; + goto clean_up; + } + + csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { + case ID_REV_CHIP_REV_A0_: + csr->flags |= LAN743X_CSR_FLAG_IS_A0; + csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + break; + case ID_REV_CHIP_REV_B0_: + csr->flags |= LAN743X_CSR_FLAG_IS_B0; + break; + } + + result = lan743x_csr_light_reset(adapter); + if (result) + goto clean_up; + return 0; +clean_up: + return result; +} + +static void lan743x_intr_software_isr(void *context) +{ + struct lan743x_adapter *adapter = context; + struct lan743x_intr *intr = &adapter->intr; + u32 int_sts; + + int_sts = lan743x_csr_read(adapter, INT_STS); + if (int_sts & INT_BIT_SW_GP_) { + lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_); + intr->software_isr_flag = 1; + } +} + +static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_tx *tx = context; + struct lan743x_adapter *adapter = tx->adapter; + bool enable_flag = true; + u32 int_en = 0; + + int_en = lan743x_csr_read(adapter, INT_EN_SET); + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + } + + if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { + u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = ioc_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = ioc_bit; + + dmac_int_en &= ioc_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & ioc_bit) { + napi_schedule(&tx->napi); + enable_flag = false;/* poll func will enable later */ + } + } + + if (enable_flag) + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); +} + +static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_rx *rx = context; + struct lan743x_adapter *adapter = rx->adapter; + bool enable_flag = true; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + } + + if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { + u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = rx_frame_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = rx_frame_bit; + + dmac_int_en &= rx_frame_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & rx_frame_bit) { + napi_schedule(&rx->napi); + enable_flag = false;/* poll funct will enable later */ + } + } + + if (enable_flag) { + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } +} + +static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_adapter *adapter = context; + unsigned int channel; + + if (int_sts & INT_BIT_ALL_RX_) { + for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; + channel++) { + u32 int_bit = INT_BIT_DMA_RX_(channel); + + if (int_sts & int_bit) { + lan743x_rx_isr(&adapter->rx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_TX_) { + for (channel = 0; channel < LAN743X_USED_TX_CHANNELS; + channel++) { + u32 int_bit = INT_BIT_DMA_TX_(channel); + + if (int_sts & int_bit) { + lan743x_tx_isr(&adapter->tx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_OTHER_) { + if (int_sts & INT_BIT_SW_GP_) { + lan743x_intr_software_isr(adapter); + int_sts &= ~INT_BIT_SW_GP_; + } + } + if (int_sts) + lan743x_csr_write(adapter, INT_EN_CLR, int_sts); +} + +static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) +{ + struct lan743x_vector *vector = ptr; + struct lan743x_adapter *adapter = vector->adapter; + irqreturn_t result = IRQ_NONE; + u32 int_enables; + u32 int_sts; + + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { + int_sts = lan743x_csr_read(adapter, INT_STS); + } else if (vector->flags & + (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { + int_sts = lan743x_csr_read(adapter, INT_STS_R2C); + } else { + /* use mask as implied status */ + int_sts = vector->int_mask | INT_BIT_MAS_; + } + + if (!(int_sts & INT_BIT_MAS_)) + goto irq_done; + + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) + /* disable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_CLR, + INT_VEC_EN_(vector->vector_index)); + + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) + /* disable master interrupt */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { + int_enables = lan743x_csr_read(adapter, INT_EN_SET); + } else { + /* use vector mask as implied enable mask */ + int_enables = vector->int_mask; + } + + int_sts &= int_enables; + int_sts &= vector->int_mask; + if (int_sts) { + if (vector->handler) { + vector->handler(vector->context, + int_sts, vector->flags); + } else { + /* disable interrupts on this vector */ + lan743x_csr_write(adapter, INT_EN_CLR, + vector->int_mask); + } + result = IRQ_HANDLED; + } + + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) + /* enable master interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) + /* enable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_SET, + INT_VEC_EN_(vector->vector_index)); +irq_done: + return result; +} + +static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int result = -ENODEV; + int timeout = 10; + + intr->software_isr_flag = 0; + + /* enable interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); + + /* activate interrupt here */ + lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); + while ((timeout > 0) && (!(intr->software_isr_flag))) { + usleep_range(1000, 20000); + timeout--; + } + + if (intr->software_isr_flag) + result = 0; + + /* disable interrupts */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); + return result; +} + +static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, + int vector_index, u32 flags, + u32 int_mask, + lan743x_vector_handler handler, + void *context) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + int ret; + + vector->adapter = adapter; + vector->flags = flags; + vector->vector_index = vector_index; + vector->int_mask = int_mask; + vector->handler = handler; + vector->context = context; + + ret = request_irq(vector->irq, + lan743x_intr_entry_isr, + (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? + IRQF_SHARED : 0, DRIVER_NAME, vector); + if (ret) { + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; + } + return ret; +} + +static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, + int vector_index) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + + free_irq(vector->irq, vector); + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; +} + +static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, + u32 int_mask) +{ + int index; + + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + if (adapter->intr.vector_list[index].int_mask & int_mask) + return adapter->intr.vector_list[index].flags; + } + return 0; +} + +static void lan743x_intr_close(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int index = 0; + + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); + + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { + lan743x_intr_unregister_isr(adapter, index); + intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); + } + } + + if (intr->flags & INTR_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + intr->flags &= ~INTR_FLAG_MSI_ENABLED; + } + + if (intr->flags & INTR_FLAG_MSIX_ENABLED) { + pci_disable_msix(adapter->pdev); + intr->flags &= ~INTR_FLAG_MSIX_ENABLED; + } +} + +static int lan743x_intr_open(struct lan743x_adapter *adapter) +{ + struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT]; + struct lan743x_intr *intr = &adapter->intr; + u32 int_vec_en_auto_clr = 0; + u32 int_vec_map0 = 0; + u32 int_vec_map1 = 0; + int ret = -ENODEV; + int index = 0; + u32 flags = 0; + + intr->number_of_vectors = 0; + + /* Try to set up MSIX interrupts */ + memset(&msix_entries[0], 0, + sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT); + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) + msix_entries[index].entry = index; + ret = pci_enable_msix_range(adapter->pdev, + msix_entries, 1, + 1 + LAN743X_USED_TX_CHANNELS + + LAN743X_USED_RX_CHANNELS); + + if (ret > 0) { + intr->flags |= INTR_FLAG_MSIX_ENABLED; + intr->number_of_vectors = ret; + intr->using_vectors = true; + for (index = 0; index < intr->number_of_vectors; index++) + intr->vector_list[index].irq = msix_entries + [index].vector; + netif_info(adapter, ifup, adapter->netdev, + "using MSIX interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } + + /* If MSIX failed try to setup using MSI interrupts */ + if (!intr->number_of_vectors) { + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + if (!pci_enable_msi(adapter->pdev)) { + intr->flags |= INTR_FLAG_MSI_ENABLED; + intr->number_of_vectors = 1; + intr->using_vectors = true; + intr->vector_list[0].irq = + adapter->pdev->irq; + netif_info(adapter, ifup, adapter->netdev, + "using MSI interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } + } + } + + /* If MSIX, and MSI failed, setup using legacy interrupt */ + if (!intr->number_of_vectors) { + intr->number_of_vectors = 1; + intr->using_vectors = false; + intr->vector_list[0].irq = intr->irq; + netif_info(adapter, ifup, adapter->netdev, + "using legacy interrupts\n"); + } + + /* At this point we must have at least one irq */ + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); + + /* map all interrupts to vector 0 */ + lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + + if (intr->using_vectors) { + flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + } else { + flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | + LAN743X_VECTOR_FLAG_IRQ_SHARED; + } + + if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; + flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; + flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; + } + + ret = lan743x_intr_register_isr(adapter, 0, flags, + INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | + INT_BIT_ALL_OTHER_, + lan743x_intr_shared_isr, adapter); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); + + if (intr->using_vectors) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(0)); + + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); + lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); + } + + /* enable interrupts */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + ret = lan743x_intr_test_isr(adapter); + if (ret) + goto clean_up; + + if (intr->number_of_vectors > 1) { + int number_of_tx_vectors = intr->number_of_vectors - 1; + + if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS) + number_of_tx_vectors = LAN743X_USED_TX_CHANNELS; + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + + for (index = 0; index < number_of_tx_vectors; index++) { + u32 int_bit = INT_BIT_DMA_TX_(index); + int vector = index + 1; + + /* map TX interrupt to vector */ + int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); + if (flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { + int_vec_en_auto_clr |= INT_VEC_EN_(vector); + lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, + int_vec_en_auto_clr); + } + + /* Remove TX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_tx_isr, + &adapter->tx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + if (!(flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) { + int number_of_rx_vectors = intr->number_of_vectors - + LAN743X_USED_TX_CHANNELS - 1; + + if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) + number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; + + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + for (index = 0; index < number_of_rx_vectors; index++) { + int vector = index + 1 + LAN743X_USED_TX_CHANNELS; + u32 int_bit = INT_BIT_DMA_RX_(index); + + /* map RX interrupt to vector */ + int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); + if (flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { + int_vec_en_auto_clr |= INT_VEC_EN_(vector); + lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, + int_vec_en_auto_clr); + } + + /* Remove RX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_rx_isr, + &adapter->rx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + return 0; + +clean_up: + lan743x_intr_close(adapter); + return ret; +} + +static int lan743x_dp_write(struct lan743x_adapter *adapter, + u32 select, u32 addr, u32 length, u32 *buf) +{ + int ret = -EIO; + u32 dp_sel; + int i; + + mutex_lock(&adapter->dp_lock); + if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) + goto unlock; + dp_sel = lan743x_csr_read(adapter, DP_SEL); + dp_sel &= ~DP_SEL_MASK_; + dp_sel |= select; + lan743x_csr_write(adapter, DP_SEL, dp_sel); + + for (i = 0; i < length; i++) { + lan743x_csr_write(adapter, DP_ADDR, addr + i); + lan743x_csr_write(adapter, DP_DATA_0, buf[i]); + lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); + if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) + goto unlock; + } + ret = 0; + +unlock: + mutex_unlock(&adapter->dp_lock); + return ret; +} + +static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) +{ + u32 ret; + + ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & + MAC_MII_ACC_PHY_ADDR_MASK_; + ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & + MAC_MII_ACC_MIIRINDA_MASK_; + + if (read) + ret |= MAC_MII_ACC_MII_READ_; + else + ret |= MAC_MII_ACC_MII_WRITE_; + ret |= MAC_MII_ACC_MII_BUSY_; + + return ret; +} + +static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) +{ + u32 data; + + return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, + !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); +} + +static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* comfirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* set the address, index & direction (read from PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + val = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(val & 0xFFFF); +} + +static int lan743x_mdiobus_write(struct mii_bus *bus, + int phy_id, int index, u16 regval) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* confirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + val = (u32)regval; + lan743x_csr_write(adapter, MAC_MII_DATA, val); + + /* set the address, index & direction (write to PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + return ret; +} + +static void lan743x_mac_set_address(struct lan743x_adapter *adapter, + u8 *addr) +{ + u32 addr_lo, addr_hi; + + addr_lo = addr[0] | + addr[1] << 8 | + addr[2] << 16 | + addr[3] << 24; + addr_hi = addr[4] | + addr[5] << 8; + lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); + lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); + + ether_addr_copy(adapter->mac_address, addr); + netif_info(adapter, drv, adapter->netdev, + "MAC address set to %pM\n", addr); +} + +static int lan743x_mac_init(struct lan743x_adapter *adapter) +{ + bool mac_address_valid = true; + struct net_device *netdev; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + u32 data; + int ret; + + netdev = adapter->netdev; + lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_); + ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_, + 0, 1000, 20000, 100); + if (ret) + return ret; + + /* setup auto duplex, and speed detection */ + data = lan743x_csr_read(adapter, MAC_CR); + data |= MAC_CR_ADD_ | MAC_CR_ASD_; + data |= MAC_CR_CNTR_RST_; + lan743x_csr_write(adapter, MAC_CR, data); + + mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); + mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); + adapter->mac_address[0] = mac_addr_lo & 0xFF; + adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; + adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; + adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; + adapter->mac_address[4] = mac_addr_hi & 0xFF; + adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; + + if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && + mac_addr_lo == 0xFFFFFFFF) { + mac_address_valid = false; + } else if (!is_valid_ether_addr(adapter->mac_address)) { + mac_address_valid = false; + } + + if (!mac_address_valid) + random_ether_addr(adapter->mac_address); + lan743x_mac_set_address(adapter, adapter->mac_address); + ether_addr_copy(netdev->dev_addr, adapter->mac_address); + return 0; +} + +static int lan743x_mac_open(struct lan743x_adapter *adapter) +{ + int ret = 0; + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_RX); + lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); + temp = lan743x_csr_read(adapter, MAC_TX); + lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); + return ret; +} + +static void lan743x_mac_close(struct lan743x_adapter *adapter) +{ + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_TX); + temp &= ~MAC_TX_TXEN_; + lan743x_csr_write(adapter, MAC_TX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, + 1, 1000, 20000, 100); + + temp = lan743x_csr_read(adapter, MAC_RX); + temp &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); +} + +static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, + bool tx_enable, bool rx_enable) +{ + u32 flow_setting = 0; + + /* set maximum pause time because when fifo space frees + * up a zero value pause frame will be sent to release the pause + */ + flow_setting = MAC_FLOW_CR_FCPT_MASK_; + if (tx_enable) + flow_setting |= MAC_FLOW_CR_TX_FCEN_; + if (rx_enable) + flow_setting |= MAC_FLOW_CR_RX_FCEN_; + lan743x_csr_write(adapter, MAC_FLOW, flow_setting); +} + +static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) +{ + int enabled = 0; + u32 mac_rx = 0; + + mac_rx = lan743x_csr_read(adapter, MAC_RX); + if (mac_rx & MAC_RX_RXEN_) { + enabled = 1; + if (mac_rx & MAC_RX_RXD_) { + lan743x_csr_write(adapter, MAC_RX, mac_rx); + mac_rx &= ~MAC_RX_RXD_; + } + mac_rx &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); + lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); + } + + mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); + mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) & + MAC_RX_MAX_SIZE_MASK_); + lan743x_csr_write(adapter, MAC_RX, mac_rx); + + if (enabled) { + mac_rx |= MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + } + return 0; +} + +/* PHY */ +static int lan743x_phy_reset(struct lan743x_adapter *adapter) +{ + u32 data; + + /* Only called with in probe, and before mdiobus_register */ + + data = lan743x_csr_read(adapter, PMT_CTL); + data |= PMT_CTL_ETH_PHY_RST_; + lan743x_csr_write(adapter, PMT_CTL, data); + + return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, + (!(data & PMT_CTL_ETH_PHY_RST_) && + (data & PMT_CTL_READY_)), + 50000, 1000000); +} + +static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, + u8 duplex, u16 local_adv, + u16 remote_adv) +{ + struct lan743x_phy *phy = &adapter->phy; + u8 cap; + + if (phy->fc_autoneg) + cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); + else + cap = phy->fc_request_control; + + lan743x_mac_flow_ctrl_set_enables(adapter, + cap & FLOW_CTRL_TX, + cap & FLOW_CTRL_RX); +} + +static int lan743x_phy_init(struct lan743x_adapter *adapter) +{ + return lan743x_phy_reset(adapter); +} + +static void lan743x_phy_link_status_change(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + + phy_print_status(phydev); + if (phydev->state == PHY_RUNNING) { + struct ethtool_link_ksettings ksettings; + int remote_advertisement = 0; + int local_advertisement = 0; + + memset(&ksettings, 0, sizeof(ksettings)); + phy_ethtool_get_link_ksettings(netdev, &ksettings); + local_advertisement = phy_read(phydev, MII_ADVERTISE); + if (local_advertisement < 0) + return; + + remote_advertisement = phy_read(phydev, MII_LPA); + if (remote_advertisement < 0) + return; + + lan743x_phy_update_flowcontrol(adapter, + ksettings.base.duplex, + local_advertisement, + remote_advertisement); + } +} + +static void lan743x_phy_close(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + phy_stop(netdev->phydev); + phy_disconnect(netdev->phydev); + netdev->phydev = NULL; +} + +static int lan743x_phy_open(struct lan743x_adapter *adapter) +{ + struct lan743x_phy *phy = &adapter->phy; + struct phy_device *phydev; + struct net_device *netdev; + int ret = -EIO; + u32 mii_adv; + + netdev = adapter->netdev; + phydev = phy_find_first(adapter->mdiobus); + if (!phydev) + goto return_error; + + ret = phy_connect_direct(netdev, phydev, + lan743x_phy_link_status_change, + PHY_INTERFACE_MODE_GMII); + if (ret) + goto return_error; + + /* MAC doesn't support 1000T Half */ + phydev->supported &= ~SUPPORTED_1000baseT_Half; + + /* support both flow controls */ + phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); + phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + phy->fc_autoneg = phydev->autoneg; + + phy_start(phydev); + phy_start_aneg(phydev); + return 0; + +return_error: + return ret; +} + +static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) +{ + u8 *mac_addr; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + + /* Add mac address to perfect Filter */ + mac_addr = adapter->mac_address; + mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | + (((u32)(mac_addr[1])) << 8) | + (((u32)(mac_addr[2])) << 16) | + (((u32)(mac_addr[3])) << 24)); + mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | + (((u32)(mac_addr[5])) << 8)); + + lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); + lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), + mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); +} + +static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 hash_table[DP_SEL_VHF_HASH_LEN]; + u32 rfctl; + u32 data; + + rfctl = lan743x_csr_read(adapter, RFE_CTL); + rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); + rfctl |= RFE_CTL_AB_; + if (netdev->flags & IFF_PROMISC) { + rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; + } else { + if (netdev->flags & IFF_ALLMULTI) + rfctl |= RFE_CTL_AM_; + } + + memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); + if (netdev_mc_count(netdev)) { + struct netdev_hw_addr *ha; + int i; + + rfctl |= RFE_CTL_DA_PERFECT_; + i = 1; + netdev_for_each_mc_addr(ha, netdev) { + /* set first 32 into Perfect Filter */ + if (i < 33) { + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), 0); + data = ha->addr[3]; + data = ha->addr[2] | (data << 8); + data = ha->addr[1] | (data << 8); + data = ha->addr[0] | (data << 8); + lan743x_csr_write(adapter, + RFE_ADDR_FILT_LO(i), data); + data = ha->addr[5]; + data = ha->addr[4] | (data << 8); + data |= RFE_ADDR_FILT_HI_VALID_; + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), data); + } else { + u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> + 23) & 0x1FF; + hash_table[bitnum / 32] |= (1 << (bitnum % 32)); + rfctl |= RFE_CTL_MCAST_HASH_; + } + i++; + } + } + + lan743x_dp_write(adapter, DP_SEL_RFE_RAM, + DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, hash_table); + lan743x_csr_write(adapter, RFE_CTL, rfctl); +} + +static int lan743x_dmac_init(struct lan743x_adapter *adapter) +{ + u32 data = 0; + + lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, + 0, 1000, 20000, 100); + switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { + case DMA_DESCRIPTOR_SPACING_16: + data = DMAC_CFG_MAX_DSPACE_16_; + break; + case DMA_DESCRIPTOR_SPACING_32: + data = DMAC_CFG_MAX_DSPACE_32_; + break; + case DMA_DESCRIPTOR_SPACING_64: + data = DMAC_CFG_MAX_DSPACE_64_; + break; + case DMA_DESCRIPTOR_SPACING_128: + data = DMAC_CFG_MAX_DSPACE_128_; + break; + default: + return -EPERM; + } + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= DMAC_CFG_COAL_EN_; + data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; + data |= DMAC_CFG_MAX_READ_REQ_SET_(6); + lan743x_csr_write(adapter, DMAC_CFG, data); + data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); + data |= DMAC_COAL_CFG_TIMER_TX_START_; + data |= DMAC_COAL_CFG_FLUSH_INTS_; + data |= DMAC_COAL_CFG_INT_EXIT_COAL_; + data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; + data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); + data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); + lan743x_csr_write(adapter, DMAC_COAL_CFG, data); + data = DMAC_OBFF_TX_THRES_SET_(0x08); + data |= DMAC_OBFF_RX_THRES_SET_(0x0A); + lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); + return 0; +} + +static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, + int tx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_T_(tx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_T_(tx_channel))); +} + +static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, + int tx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, + int rx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_R_(rx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_R_(rx_channel))); +} + +static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, + int rx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +static void lan743x_tx_release_desc(struct lan743x_tx *tx, + int descriptor_index, bool cleanup) +{ + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_tx_descriptor *descriptor = NULL; + u32 descriptor_type = 0; + + descriptor = &tx->ring_cpu_ptr[descriptor_index]; + buffer_info = &tx->buffer_info[descriptor_index]; + if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) + goto done; + + descriptor_type = (descriptor->data0) & + TX_DESC_DATA0_DTYPE_MASK_; + if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) + goto clean_up_data_descriptor; + else + goto clear_active; + +clean_up_data_descriptor: + if (buffer_info->dma_ptr) { + if (buffer_info->flags & + TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { + dma_unmap_page(&tx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } else { + dma_unmap_single(&tx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + +clear_active: + buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; + +done: + memset(buffer_info, 0, sizeof(*buffer_info)); + memset(descriptor, 0, sizeof(*descriptor)); +} + +static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) +{ + return ((++index) % tx->ring_size); +} + +static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) +{ + while ((*tx->head_cpu_ptr) != (tx->last_head)) { + lan743x_tx_release_desc(tx, tx->last_head, false); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } +} + +static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) +{ + u32 original_head = 0; + + original_head = tx->last_head; + do { + lan743x_tx_release_desc(tx, tx->last_head, true); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } while (tx->last_head != original_head); + memset(tx->ring_cpu_ptr, 0, + sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); + memset(tx->buffer_info, 0, + sizeof(*tx->buffer_info) * (tx->ring_size)); +} + +static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int result = 1; /* 1 for the main skb buffer */ + int nr_frags = 0; + + if (skb_is_gso(skb)) + result++; /* requires an extension descriptor */ + nr_frags = skb_shinfo(skb)->nr_frags; + result += nr_frags; /* 1 for each fragment buffer */ + return result; +} + +static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) +{ + int last_head = tx->last_head; + int last_tail = tx->last_tail; + + if (last_tail >= last_head) + return tx->ring_size - last_tail + last_head - 1; + else + return last_head - last_tail - 1; +} + +static int lan743x_tx_frame_start(struct lan743x_tx *tx, + unsigned char *first_buffer, + unsigned int first_buffer_length, + unsigned int frame_length, + bool check_sum) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pdev->dev; + dma_addr_t dma_ptr; + + tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = tx->last_tail; + tx->frame_tail = tx->frame_first; + + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) + return -ENOMEM; + + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); + tx_descriptor->data3 = (frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; + + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = first_buffer_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + + tx->frame_data0 = (first_buffer_length & + TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FS_ | + TX_DESC_DATA0_FCS_; + + if (check_sum) + tx->frame_data0 |= TX_DESC_DATA0_ICE_ | + TX_DESC_DATA0_IPE_ | + TX_DESC_DATA0_TPE_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, + unsigned int frame_length) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + + /* wrap up previous descriptor */ + tx->frame_data0 |= TX_DESC_DATA0_EXT_; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = tx->frame_data0; + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + + /* add extension descriptor */ + tx_descriptor->data1 = 0; + tx_descriptor->data2 = 0; + tx_descriptor->data3 = 0; + + buffer_info->skb = NULL; + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + + tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_EXT_ | + TX_DESC_DATA0_EXT_LSO_; + + /* data0 will be programmed in one of other frame assembler functions */ +} + +static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, + const struct skb_frag_struct *fragment, + unsigned int frame_length) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pdev->dev; + unsigned int fragment_length = 0; + dma_addr_t dma_ptr; + + fragment_length = skb_frag_size(fragment); + if (!fragment_length) + return 0; + + /* wrap up previous descriptor */ + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = tx->frame_data0; + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = skb_frag_dma_map(dev, fragment, + 0, fragment_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) { + int desc_index; + + /* cleanup all previously setup descriptors */ + desc_index = tx->frame_first; + while (desc_index != tx->frame_tail) { + lan743x_tx_release_desc(tx, desc_index, true); + desc_index = lan743x_tx_next_index(tx, desc_index); + } + dma_wmb(); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = 0; + tx->frame_data0 = 0; + tx->frame_tail = 0; + return -ENOMEM; + } + + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); + tx_descriptor->data3 = (frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; + + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = fragment_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; + + tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FCS_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_end(struct lan743x_tx *tx, + struct sk_buff *skb, + bool ignore_sync) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + u32 tx_tail_flags = 0; + + /* wrap up previous descriptor */ + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + buffer_info->skb = skb; + if (ignore_sync) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + + tx_descriptor->data0 = tx->frame_data0; + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx->last_tail = tx->frame_tail; + + dma_wmb(); + + if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | + TX_TAIL_SET_TOP_INT_EN_; + + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + tx_tail_flags | tx->frame_tail); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; +} + +static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int required_number_of_descriptors = 0; + unsigned int start_frame_length = 0; + unsigned int frame_length = 0; + unsigned int head_length = 0; + unsigned long irq_flags = 0; + bool ignore_sync = false; + int nr_frags = 0; + bool gso = false; + int j; + + required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); + + spin_lock_irqsave(&tx->ring_lock, irq_flags); + if (required_number_of_descriptors > + lan743x_tx_get_avail_desc(tx)) { + if (required_number_of_descriptors > (tx->ring_size - 1)) { + dev_kfree_skb(skb); + } else { + /* save to overflow buffer */ + tx->overflow_skb = skb; + netif_stop_queue(tx->adapter->netdev); + } + goto unlock; + } + + /* space available, transmit skb */ + head_length = skb_headlen(skb); + frame_length = skb_pagelen(skb); + nr_frags = skb_shinfo(skb)->nr_frags; + start_frame_length = frame_length; + gso = skb_is_gso(skb); + if (gso) { + start_frame_length = max(skb_shinfo(skb)->gso_size, + (unsigned short)8); + } + + if (lan743x_tx_frame_start(tx, + skb->data, head_length, + start_frame_length, + skb->ip_summed == CHECKSUM_PARTIAL)) { + dev_kfree_skb(skb); + goto unlock; + } + + if (gso) + lan743x_tx_frame_add_lso(tx, frame_length); + + if (nr_frags <= 0) + goto finish; + + for (j = 0; j < nr_frags; j++) { + const struct skb_frag_struct *frag; + + frag = &(skb_shinfo(skb)->frags[j]); + if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { + /* upon error no need to call + * lan743x_tx_frame_end + * frame assembler clean up was performed inside + * lan743x_tx_frame_add_fragment + */ + dev_kfree_skb(skb); + goto unlock; + } + } + +finish: + lan743x_tx_frame_end(tx, skb, ignore_sync); + +unlock: + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + return NETDEV_TX_OK; +} + +static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); + struct lan743x_adapter *adapter = tx->adapter; + bool start_transmitter = false; + unsigned long irq_flags = 0; + u32 ioc_bit = 0; + u32 int_sts = 0; + + ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) + lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); + spin_lock_irqsave(&tx->ring_lock, irq_flags); + + /* clean up tx ring */ + lan743x_tx_release_completed_descriptors(tx); + if (netif_queue_stopped(adapter->netdev)) { + if (tx->overflow_skb) { + if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <= + lan743x_tx_get_avail_desc(tx)) + start_transmitter = true; + } else { + netif_wake_queue(adapter->netdev); + } + } + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + + if (start_transmitter) { + /* space is now available, transmit overflow skb */ + lan743x_tx_xmit_frame(tx, tx->overflow_skb); + tx->overflow_skb = NULL; + netif_wake_queue(adapter->netdev); + } + + if (!napi_complete_done(napi, weight)) + goto done; + + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_read(adapter, INT_STS); + +done: + return weight; +} + +static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) +{ + if (tx->head_cpu_ptr) { + pci_free_consistent(tx->adapter->pdev, + sizeof(*tx->head_cpu_ptr), + (void *)(tx->head_cpu_ptr), + tx->head_dma_ptr); + tx->head_cpu_ptr = NULL; + tx->head_dma_ptr = 0; + } + kfree(tx->buffer_info); + tx->buffer_info = NULL; + + if (tx->ring_cpu_ptr) { + pci_free_consistent(tx->adapter->pdev, + tx->ring_allocation_size, + tx->ring_cpu_ptr, + tx->ring_dma_ptr); + tx->ring_allocation_size = 0; + tx->ring_cpu_ptr = NULL; + tx->ring_dma_ptr = 0; + } + tx->ring_size = 0; +} + +static int lan743x_tx_ring_init(struct lan743x_tx *tx) +{ + size_t ring_allocation_size = 0; + void *cpu_ptr = NULL; + dma_addr_t dma_ptr; + int ret = -ENOMEM; + + tx->ring_size = LAN743X_TX_RING_SIZE; + if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + ring_allocation_size = ALIGN(tx->ring_size * + sizeof(struct lan743x_tx_descriptor), + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, + ring_allocation_size, &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + tx->ring_allocation_size = ring_allocation_size; + tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; + tx->ring_dma_ptr = dma_ptr; + + cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, + sizeof(*tx->head_cpu_ptr), &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + tx->head_cpu_ptr = cpu_ptr; + tx->head_dma_ptr = dma_ptr; + if (tx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + + return 0; + +cleanup: + lan743x_tx_ring_cleanup(tx); + return ret; +} + +static void lan743x_tx_close(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = tx->adapter; + + lan743x_csr_write(adapter, + DMAC_CMD, + DMAC_CMD_STOP_T_(tx->channel_number)); + lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); + + lan743x_csr_write(adapter, + DMAC_INT_EN_CLR, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + napi_disable(&tx->napi); + netif_napi_del(&tx->napi); + + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_DIS_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number), + 0, 1000, 20000, 100); + + lan743x_tx_release_all_descriptors(tx); + + if (tx->overflow_skb) { + dev_kfree_skb(tx->overflow_skb); + tx->overflow_skb = NULL; + } + + lan743x_tx_ring_cleanup(tx); +} + +static int lan743x_tx_open(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = NULL; + u32 data = 0; + int ret; + + adapter = tx->adapter; + ret = lan743x_tx_ring_init(tx); + if (ret) + return ret; + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number), + 0, 1000, 20000, 100); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number)); + + /* reset tx channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number), + 0, 1000, 20000, 100); + + /* Write TX_BASE_ADDR */ + lan743x_csr_write(adapter, + TX_BASE_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->ring_dma_ptr)); + lan743x_csr_write(adapter, + TX_BASE_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->ring_dma_ptr)); + + /* Write TX_CFG_B */ + data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); + data &= ~TX_CFG_B_TX_RING_LEN_MASK_; + data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= TX_CFG_B_TDMABL_512_; + lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); + + /* Write TX_CFG_A */ + data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; + data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); + data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); + data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); + } + lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); + + /* Write TX_HEAD_WRITEBACK_ADDR */ + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->head_dma_ptr)); + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->head_dma_ptr)); + + /* set last head */ + tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); + + /* write TX_TAIL */ + tx->last_tail = 0; + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + (u32)(tx->last_tail)); + tx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_TX_ + (tx->channel_number)); + netif_napi_add(adapter->netdev, + &tx->napi, lan743x_tx_napi_poll, + tx->ring_size - 1); + napi_enable(&tx->napi); + + data = 0; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= TX_CFG_C_TX_INT_EN_R2C_; + lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); + + if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + + /* start dmac channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_T_(tx->channel_number)); + return 0; +} + +static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) +{ + return ((++index) % rx->ring_size); +} + +static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + int length = 0; + + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev, + length, + GFP_ATOMIC | GFP_DMA); + if (!(buffer_info->skb)) + return -ENOMEM; + buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev, + buffer_info->skb->data, + length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&rx->adapter->pdev->dev, + buffer_info->dma_ptr)) { + buffer_info->dma_ptr = 0; + return -ENOMEM; + } + + buffer_info->buffer_length = length; + descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); + descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); + descriptor->data3 = 0; + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)); + skb_reserve(buffer_info->skb, RX_HEAD_PADDING); + + return 0; +} + +static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + + descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); + descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); + descriptor->data3 = 0; + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + ((buffer_info->buffer_length) & + RX_DESC_DATA0_BUF_LENGTH_MASK_)); +} + +static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + + memset(descriptor, 0, sizeof(*descriptor)); + + if (buffer_info->dma_ptr) { + dma_unmap_single(&rx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE); + buffer_info->dma_ptr = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + memset(buffer_info, 0, sizeof(*buffer_info)); +} + +static int lan743x_rx_process_packet(struct lan743x_rx *rx) +{ + struct skb_shared_hwtstamps *hwtstamps = NULL; + int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + int current_head_index = -1; + int extension_index = -1; + int first_index = -1; + int last_index = -1; + + current_head_index = *rx->head_cpu_ptr; + if (current_head_index < 0 || current_head_index >= rx->ring_size) + goto done; + + if (rx->last_head < 0 || rx->last_head >= rx->ring_size) + goto done; + + if (rx->last_head != current_head_index) { + descriptor = &rx->ring_cpu_ptr[rx->last_head]; + if (descriptor->data0 & RX_DESC_DATA0_OWN_) + goto done; + + if (!(descriptor->data0 & RX_DESC_DATA0_FS_)) + goto done; + + first_index = rx->last_head; + if (descriptor->data0 & RX_DESC_DATA0_LS_) { + last_index = rx->last_head; + } else { + int index; + + index = lan743x_rx_next_index(rx, first_index); + while (index != current_head_index) { + descriptor = &rx->ring_cpu_ptr[index]; + if (descriptor->data0 & RX_DESC_DATA0_OWN_) + goto done; + + if (descriptor->data0 & RX_DESC_DATA0_LS_) { + last_index = index; + break; + } + index = lan743x_rx_next_index(rx, index); + } + } + if (last_index >= 0) { + descriptor = &rx->ring_cpu_ptr[last_index]; + if (descriptor->data0 & RX_DESC_DATA0_EXT_) { + /* extension is expected to follow */ + int index = lan743x_rx_next_index(rx, + last_index); + if (index != current_head_index) { + descriptor = &rx->ring_cpu_ptr[index]; + if (descriptor->data0 & + RX_DESC_DATA0_OWN_) { + goto done; + } + if (descriptor->data0 & + RX_DESC_DATA0_EXT_) { + extension_index = index; + } else { + goto done; + } + } else { + /* extension is not yet available */ + /* prevent processing of this packet */ + first_index = -1; + last_index = -1; + } + } + } + } + if (first_index >= 0 && last_index >= 0) { + int real_last_index = last_index; + struct sk_buff *skb = NULL; + u32 ts_sec = 0; + u32 ts_nsec = 0; + + /* packet is available */ + if (first_index == last_index) { + /* single buffer packet */ + int packet_length; + + buffer_info = &rx->buffer_info[first_index]; + skb = buffer_info->skb; + descriptor = &rx->ring_cpu_ptr[first_index]; + + /* unmap from dma */ + if (buffer_info->dma_ptr) { + dma_unmap_single(&rx->adapter->pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE); + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + } + buffer_info->skb = NULL; + packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_ + (descriptor->data0); + skb_put(skb, packet_length - 4); + skb->protocol = eth_type_trans(skb, + rx->adapter->netdev); + lan743x_rx_allocate_ring_element(rx, first_index); + } else { + int index = first_index; + + /* multi buffer packet not supported */ + /* this should not happen since + * buffers are allocated to be at least jumbo size + */ + + /* clean up buffers */ + if (first_index <= last_index) { + while ((index >= first_index) && + (index <= last_index)) { + lan743x_rx_release_ring_element(rx, + index); + lan743x_rx_allocate_ring_element(rx, + index); + index = lan743x_rx_next_index(rx, + index); + } + } else { + while ((index >= first_index) || + (index <= last_index)) { + lan743x_rx_release_ring_element(rx, + index); + lan743x_rx_allocate_ring_element(rx, + index); + index = lan743x_rx_next_index(rx, + index); + } + } + } + + if (extension_index >= 0) { + descriptor = &rx->ring_cpu_ptr[extension_index]; + buffer_info = &rx->buffer_info[extension_index]; + + ts_sec = descriptor->data1; + ts_nsec = (descriptor->data2 & + RX_DESC_DATA2_TS_NS_MASK_); + lan743x_rx_reuse_ring_element(rx, extension_index); + real_last_index = extension_index; + } + + if (!skb) { + result = RX_PROCESS_RESULT_PACKET_DROPPED; + goto move_forward; + } + + if (extension_index < 0) + goto pass_packet_to_os; + hwtstamps = skb_hwtstamps(skb); + if (hwtstamps) + hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec); + +pass_packet_to_os: + /* pass packet to OS */ + napi_gro_receive(&rx->napi, skb); + result = RX_PROCESS_RESULT_PACKET_RECEIVED; + +move_forward: + /* push tail and head forward */ + rx->last_tail = real_last_index; + rx->last_head = lan743x_rx_next_index(rx, real_last_index); + } +done: + return result; +} + +static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); + struct lan743x_adapter *adapter = rx->adapter; + u32 rx_tail_flags = 0; + int count; + + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { + /* clear int status bit before reading packet */ + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + } + count = 0; + while (count < weight) { + int rx_process_result = -1; + + rx_process_result = lan743x_rx_process_packet(rx); + if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { + count++; + } else if (rx_process_result == + RX_PROCESS_RESULT_NOTHING_TO_DO) { + break; + } else if (rx_process_result == + RX_PROCESS_RESULT_PACKET_DROPPED) { + continue; + } + } + rx->frame_count += count; + if (count == weight) + goto done; + + if (!napi_complete_done(napi, count)) + goto done; + + if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { + rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; + } else { + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } + + /* update RX_TAIL */ + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx_tail_flags | rx->last_tail); +done: + return count; +} + +static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) +{ + if (rx->buffer_info && rx->ring_cpu_ptr) { + int index; + + for (index = 0; index < rx->ring_size; index++) + lan743x_rx_release_ring_element(rx, index); + } + + if (rx->head_cpu_ptr) { + pci_free_consistent(rx->adapter->pdev, + sizeof(*rx->head_cpu_ptr), + rx->head_cpu_ptr, + rx->head_dma_ptr); + rx->head_cpu_ptr = NULL; + rx->head_dma_ptr = 0; + } + + kfree(rx->buffer_info); + rx->buffer_info = NULL; + + if (rx->ring_cpu_ptr) { + pci_free_consistent(rx->adapter->pdev, + rx->ring_allocation_size, + rx->ring_cpu_ptr, + rx->ring_dma_ptr); + rx->ring_allocation_size = 0; + rx->ring_cpu_ptr = NULL; + rx->ring_dma_ptr = 0; + } + + rx->ring_size = 0; + rx->last_head = 0; +} + +static int lan743x_rx_ring_init(struct lan743x_rx *rx) +{ + size_t ring_allocation_size = 0; + dma_addr_t dma_ptr = 0; + void *cpu_ptr = NULL; + int ret = -ENOMEM; + int index = 0; + + rx->ring_size = LAN743X_RX_RING_SIZE; + if (rx->ring_size <= 1) { + ret = -EINVAL; + goto cleanup; + } + if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + ring_allocation_size = ALIGN(rx->ring_size * + sizeof(struct lan743x_rx_descriptor), + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, + ring_allocation_size, &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->ring_allocation_size = ring_allocation_size; + rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; + rx->ring_dma_ptr = dma_ptr; + + cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), + GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, + sizeof(*rx->head_cpu_ptr), &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + + rx->head_cpu_ptr = cpu_ptr; + rx->head_dma_ptr = dma_ptr; + if (rx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + + rx->last_head = 0; + for (index = 0; index < rx->ring_size; index++) { + ret = lan743x_rx_allocate_ring_element(rx, index); + if (ret) + goto cleanup; + } + return 0; + +cleanup: + lan743x_rx_ring_cleanup(rx); + return ret; +} + +static void lan743x_rx_close(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_DIS_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number), + 0, 1000, 20000, 100); + + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_STOP_R_(rx->channel_number)); + lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); + + lan743x_csr_write(adapter, DMAC_INT_EN_CLR, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + napi_disable(&rx->napi); + + netif_napi_del(&rx->napi); + + lan743x_rx_ring_cleanup(rx); +} + +static int lan743x_rx_open(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + u32 data = 0; + int ret; + + rx->frame_count = 0; + ret = lan743x_rx_ring_init(rx); + if (ret) + goto return_error; + + netif_napi_add(adapter->netdev, + &rx->napi, lan743x_rx_napi_poll, + rx->ring_size - 1); + + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number), + 0, 1000, 20000, 100); + + /* set ring base address */ + lan743x_csr_write(adapter, + RX_BASE_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->ring_dma_ptr)); + lan743x_csr_write(adapter, + RX_BASE_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->ring_dma_ptr)); + + /* set rx write back address */ + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->head_dma_ptr)); + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->head_dma_ptr)); + data = RX_CFG_A_RX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | + RX_CFG_A_RX_WB_THRES_SET_(0x7) | + RX_CFG_A_RX_PF_THRES_SET_(16) | + RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); + } + + /* set RX_CFG_A */ + lan743x_csr_write(adapter, + RX_CFG_A(rx->channel_number), data); + + /* set RX_CFG_B */ + data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); + data &= ~RX_CFG_B_RX_PAD_MASK_; + if (!RX_HEAD_PADDING) + data |= RX_CFG_B_RX_PAD_0_; + else + data |= RX_CFG_B_RX_PAD_2_; + data &= ~RX_CFG_B_RX_RING_LEN_MASK_; + data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); + data |= RX_CFG_B_TS_ALL_RX_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= RX_CFG_B_RDMABL_512_; + + lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); + rx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_RX_ + (rx->channel_number)); + + /* set RX_CFG_C */ + data = 0; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= RX_CFG_C_RX_INT_EN_R2C_; + lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); + + rx->last_tail = ((u32)(rx->ring_size - 1)); + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx->last_tail); + rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); + if (rx->last_head) { + ret = -EIO; + goto napi_delete; + } + + napi_enable(&rx->napi); + + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_R_(rx->channel_number)); + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number), + 0, 1000, 20000, 100); + lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), + FCT_FLOW_CTL_REQ_EN_ | + FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | + FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number)); + return 0; + +napi_delete: + netif_napi_del(&rx->napi); + lan743x_rx_ring_cleanup(rx); + +return_error: + return ret; +} + +static int lan743x_netdev_close(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + + lan743x_tx_close(&adapter->tx[0]); + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) + lan743x_rx_close(&adapter->rx[index]); + + lan743x_phy_close(adapter); + + lan743x_mac_close(adapter); + + lan743x_intr_close(adapter); + + return 0; +} + +static int lan743x_netdev_open(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + int ret; + + ret = lan743x_intr_open(adapter); + if (ret) + goto return_error; + + ret = lan743x_mac_open(adapter); + if (ret) + goto close_intr; + + ret = lan743x_phy_open(adapter); + if (ret) + goto close_mac; + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + ret = lan743x_rx_open(&adapter->rx[index]); + if (ret) + goto close_rx; + } + + ret = lan743x_tx_open(&adapter->tx[0]); + if (ret) + goto close_rx; + + return 0; + +close_rx: + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + if (adapter->rx[index].ring_cpu_ptr) + lan743x_rx_close(&adapter->rx[index]); + } + lan743x_phy_close(adapter); + +close_mac: + lan743x_mac_close(adapter); + +close_intr: + lan743x_intr_close(adapter); + +return_error: + netif_warn(adapter, ifup, adapter->netdev, + "Error opening LAN743x\n"); + return ret; +} + +static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return lan743x_tx_xmit_frame(&adapter->tx[0], skb); +} + +static int lan743x_netdev_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + return phy_mii_ioctl(netdev->phydev, ifr, cmd); +} + +static void lan743x_netdev_set_multicast(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_rfe_set_multicast(adapter); +} + +static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret = lan743x_mac_set_mtu(adapter, new_mtu); + if (!ret) + netdev->mtu = new_mtu; + return ret; +} + +static void lan743x_netdev_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); + stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); + stats->rx_bytes = lan743x_csr_read(adapter, + STAT_RX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_MULTICAST_BYTE_COUNT); + stats->tx_bytes = lan743x_csr_read(adapter, + STAT_TX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_BYTE_COUNT); + stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_ALIGNMENT_ERRORS) + + lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_UNDERSIZE_FRAME_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_OVERSIZE_FRAME_ERRORS); + stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_TX_EXCESS_DEFERRAL_ERRORS) + + lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); + stats->rx_dropped = lan743x_csr_read(adapter, + STAT_RX_DROPPED_FRAMES); + stats->tx_dropped = lan743x_csr_read(adapter, + STAT_TX_EXCESSIVE_COLLISION); + stats->multicast = lan743x_csr_read(adapter, + STAT_RX_MULTICAST_FRAMES) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_FRAMES); + stats->collisions = lan743x_csr_read(adapter, + STAT_TX_SINGLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_MULTIPLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_LATE_COLLISIONS); +} + +static int lan743x_netdev_set_mac_address(struct net_device *netdev, + void *addr) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct sockaddr *sock_addr = addr; + int ret; + + ret = eth_prepare_mac_addr_change(netdev, sock_addr); + if (ret) + return ret; + ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + lan743x_mac_set_address(adapter, sock_addr->sa_data); + lan743x_rfe_update_mac_address(adapter); + return 0; +} + +static const struct net_device_ops lan743x_netdev_ops = { + .ndo_open = lan743x_netdev_open, + .ndo_stop = lan743x_netdev_close, + .ndo_start_xmit = lan743x_netdev_xmit_frame, + .ndo_do_ioctl = lan743x_netdev_ioctl, + .ndo_set_rx_mode = lan743x_netdev_set_multicast, + .ndo_change_mtu = lan743x_netdev_change_mtu, + .ndo_get_stats64 = lan743x_netdev_get_stats64, + .ndo_set_mac_address = lan743x_netdev_set_mac_address, +}; + +static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); +} + +static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) +{ + mdiobus_unregister(adapter->mdiobus); +} + +static void lan743x_full_cleanup(struct lan743x_adapter *adapter) +{ + unregister_netdev(adapter->netdev); + + lan743x_mdiobus_cleanup(adapter); + lan743x_hardware_cleanup(adapter); + lan743x_pci_cleanup(adapter); +} + +static int lan743x_hardware_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + struct lan743x_tx *tx; + int index; + int ret; + + adapter->intr.irq = adapter->pdev->irq; + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); + mutex_init(&adapter->dp_lock); + ret = lan743x_mac_init(adapter); + if (ret) + return ret; + + ret = lan743x_phy_init(adapter); + if (ret) + return ret; + + lan743x_rfe_update_mac_address(adapter); + + ret = lan743x_dmac_init(adapter); + if (ret) + return ret; + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + adapter->rx[index].adapter = adapter; + adapter->rx[index].channel_number = index; + } + + tx = &adapter->tx[0]; + tx->adapter = adapter; + tx->channel_number = 0; + spin_lock_init(&tx->ring_lock); + return 0; +} + +static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) +{ + int ret; + + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); + if (!(adapter->mdiobus)) { + ret = -ENOMEM; + goto return_error; + } + + adapter->mdiobus->priv = (void *)adapter; + adapter->mdiobus->read = lan743x_mdiobus_read; + adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->name = "lan743x-mdiobus"; + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, + "pci-%s", pci_name(adapter->pdev)); + + /* set to internal PHY id */ + adapter->mdiobus->phy_mask = ~(u32)BIT(1); + + /* register mdiobus */ + ret = mdiobus_register(adapter->mdiobus); + if (ret < 0) + goto return_error; + return 0; + +return_error: + return ret; +} + +/* lan743x_pcidev_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @id: entry in lan743x_pci_tbl + * + * Returns 0 on success, negative on failure + * + * initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int lan743x_pcidev_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct lan743x_adapter *adapter = NULL; + struct net_device *netdev = NULL; + int ret = -ENODEV; + + netdev = devm_alloc_etherdev(&pdev->dev, + sizeof(struct lan743x_adapter)); + if (!netdev) + goto return_error; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; + + ret = lan743x_pci_init(adapter, pdev); + if (ret) + goto return_error; + + ret = lan743x_csr_init(adapter); + if (ret) + goto cleanup_pci; + + ret = lan743x_hardware_init(adapter, pdev); + if (ret) + goto cleanup_pci; + + ret = lan743x_mdiobus_init(adapter); + if (ret) + goto cleanup_hardware; + + adapter->netdev->netdev_ops = &lan743x_netdev_ops; + adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; + adapter->netdev->hw_features = adapter->netdev->features; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = register_netdev(adapter->netdev); + if (ret < 0) + goto cleanup_mdiobus; + return 0; + +cleanup_mdiobus: + lan743x_mdiobus_cleanup(adapter); + +cleanup_hardware: + lan743x_hardware_cleanup(adapter); + +cleanup_pci: + lan743x_pci_cleanup(adapter); + +return_error: + pr_warn("Initialization failed\n"); + return ret; +} + +/** + * lan743x_pcidev_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * this is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void lan743x_pcidev_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_full_cleanup(adapter); +} + +static void lan743x_pcidev_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + netif_device_detach(netdev); + + /* close netdev when netdev is at running state. + * For instance, it is true when system goes to sleep by pm-suspend + * However, it is false when system goes to sleep by suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_close(netdev); + rtnl_unlock(); + + /* clean up lan743x portion */ + lan743x_hardware_cleanup(adapter); +} + +static const struct pci_device_id lan743x_pcidev_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, + { 0, } +}; + +static struct pci_driver lan743x_pcidev_driver = { + .name = DRIVER_NAME, + .id_table = lan743x_pcidev_tbl, + .probe = lan743x_pcidev_probe, + .remove = lan743x_pcidev_remove, + .shutdown = lan743x_pcidev_shutdown, +}; + +module_pci_driver(lan743x_pcidev_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h new file mode 100644 index 000000000000..73b463a9df61 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -0,0 +1,597 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_H +#define _LAN743X_H + +#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>" +#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver" +#define DRIVER_NAME "lan743x" + +/* Register Definitions */ +#define ID_REV (0x00) +#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \ + (((id_rev) & 0xFFF00000) == 0x74300000) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_REV_A0_ (0x00000000) +#define ID_REV_CHIP_REV_B0_ (0x00000010) + +#define FPGA_REV (0x04) +#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF) +#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF) + +#define HW_CFG (0x010) +#define HW_CFG_LRST_ BIT(1) + +#define PMT_CTL (0x014) +#define PMT_CTL_READY_ BIT(7) +#define PMT_CTL_ETH_PHY_RST_ BIT(4) + +#define DP_SEL (0x024) +#define DP_SEL_DPRDY_ BIT(31) +#define DP_SEL_MASK_ (0x0000001F) +#define DP_SEL_RFE_RAM (0x00000001) + +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) + +#define DP_CMD (0x028) +#define DP_CMD_WRITE_ (0x00000001) + +#define DP_ADDR (0x02C) + +#define DP_DATA_0 (0x030) + +#define FCT_RX_CTL (0xAC) +#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_RX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_TX_CTL (0xC4) +#define FCT_TX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_TX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_TX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_FLOW(rx_channel) (0xE0 + ((rx_channel) << 2)) +#define FCT_FLOW_CTL_OFF_THRESHOLD_ (0x00007F00) +#define FCT_FLOW_CTL_OFF_THRESHOLD_SET_(value) \ + ((value << 8) & FCT_FLOW_CTL_OFF_THRESHOLD_) +#define FCT_FLOW_CTL_REQ_EN_ BIT(7) +#define FCT_FLOW_CTL_ON_THRESHOLD_ (0x0000007F) +#define FCT_FLOW_CTL_ON_THRESHOLD_SET_(value) \ + ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_) + +#define MAC_CR (0x100) +#define MAC_CR_ADD_ BIT(12) +#define MAC_CR_ASD_ BIT(11) +#define MAC_CR_CNTR_RST_ BIT(5) +#define MAC_CR_RST_ BIT(0) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE_SHIFT_ (16) +#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000) +#define MAC_RX_RXD_ BIT(1) +#define MAC_RX_RXEN_ BIT(0) + +#define MAC_TX (0x108) +#define MAC_TX_TXD_ BIT(1) +#define MAC_TX_TXEN_ BIT(0) + +#define MAC_FLOW (0x10C) +#define MAC_FLOW_CR_TX_FCEN_ BIT(30) +#define MAC_FLOW_CR_RX_FCEN_ BIT(29) +#define MAC_FLOW_CR_FCPT_MASK_ (0x0000FFFF) + +#define MAC_RX_ADDRH (0x118) + +#define MAC_RX_ADDRL (0x11C) + +#define MAC_MII_ACC (0x120) +#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11) +#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800) +#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6) +#define MAC_MII_ACC_MIIRINDA_MASK_ (0x000007C0) +#define MAC_MII_ACC_MII_READ_ (0x00000000) +#define MAC_MII_ACC_MII_WRITE_ (0x00000002) +#define MAC_MII_ACC_MII_BUSY_ BIT(0) + +#define MAC_MII_DATA (0x124) + +/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x))) +#define RFE_ADDR_FILT_HI_VALID_ BIT(31) + +/* offset 0x404 - 0x504, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x))) + +#define RFE_CTL (0x508) +#define RFE_CTL_AB_ BIT(10) +#define RFE_CTL_AM_ BIT(9) +#define RFE_CTL_AU_ BIT(8) +#define RFE_CTL_MCAST_HASH_ BIT(3) +#define RFE_CTL_DA_PERFECT_ BIT(1) + +#define INT_STS (0x780) +#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) +#define INT_BIT_ALL_RX_ (0x0F000000) +#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel)) +#define INT_BIT_ALL_TX_ (0x000F0000) +#define INT_BIT_SW_GP_ BIT(9) +#define INT_BIT_ALL_OTHER_ (0x00000280) +#define INT_BIT_MAS_ BIT(0) + +#define INT_SET (0x784) + +#define INT_EN_SET (0x788) + +#define INT_EN_CLR (0x78C) + +#define INT_STS_R2C (0x790) + +#define INT_VEC_EN_SET (0x794) +#define INT_VEC_EN_CLR (0x798) +#define INT_VEC_EN_AUTO_CLR (0x79C) +#define INT_VEC_EN_(vector_index) BIT(0 + vector_index) + +#define INT_VEC_MAP0 (0x7A0) +#define INT_VEC_MAP0_RX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP1 (0x7A4) +#define INT_VEC_MAP1_TX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP2 (0x7A8) + +#define INT_MOD_MAP0 (0x7B0) + +#define INT_MOD_MAP1 (0x7B4) + +#define INT_MOD_MAP2 (0x7B8) + +#define INT_MOD_CFG0 (0x7C0) +#define INT_MOD_CFG1 (0x7C4) +#define INT_MOD_CFG2 (0x7C8) +#define INT_MOD_CFG3 (0x7CC) +#define INT_MOD_CFG4 (0x7D0) +#define INT_MOD_CFG5 (0x7D4) +#define INT_MOD_CFG6 (0x7D8) +#define INT_MOD_CFG7 (0x7DC) + +#define DMAC_CFG (0xC00) +#define DMAC_CFG_COAL_EN_ BIT(16) +#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000) +#define DMAC_CFG_MAX_READ_REQ_MASK_ (0x00000070) +#define DMAC_CFG_MAX_READ_REQ_SET_(val) \ + ((((u32)(val)) << 4) & DMAC_CFG_MAX_READ_REQ_MASK_) +#define DMAC_CFG_MAX_DSPACE_16_ (0x00000000) +#define DMAC_CFG_MAX_DSPACE_32_ (0x00000001) +#define DMAC_CFG_MAX_DSPACE_64_ BIT(1) +#define DMAC_CFG_MAX_DSPACE_128_ (0x00000003) + +#define DMAC_COAL_CFG (0xC04) +#define DMAC_COAL_CFG_TIMER_LIMIT_MASK_ (0xFFF00000) +#define DMAC_COAL_CFG_TIMER_LIMIT_SET_(val) \ + ((((u32)(val)) << 20) & DMAC_COAL_CFG_TIMER_LIMIT_MASK_) +#define DMAC_COAL_CFG_TIMER_TX_START_ BIT(19) +#define DMAC_COAL_CFG_FLUSH_INTS_ BIT(18) +#define DMAC_COAL_CFG_INT_EXIT_COAL_ BIT(17) +#define DMAC_COAL_CFG_CSR_EXIT_COAL_ BIT(16) +#define DMAC_COAL_CFG_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_COAL_CFG_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_COAL_CFG_TX_THRES_MASK_) +#define DMAC_COAL_CFG_RX_THRES_MASK_ (0x000000FF) +#define DMAC_COAL_CFG_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_COAL_CFG_RX_THRES_MASK_) + +#define DMAC_OBFF_CFG (0xC08) +#define DMAC_OBFF_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_OBFF_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_OBFF_TX_THRES_MASK_) +#define DMAC_OBFF_RX_THRES_MASK_ (0x000000FF) +#define DMAC_OBFF_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_OBFF_RX_THRES_MASK_) + +#define DMAC_CMD (0xC0C) +#define DMAC_CMD_SWR_ BIT(31) +#define DMAC_CMD_TX_SWR_(channel) BIT(24 + (channel)) +#define DMAC_CMD_START_T_(channel) BIT(20 + (channel)) +#define DMAC_CMD_STOP_T_(channel) BIT(16 + (channel)) +#define DMAC_CMD_RX_SWR_(channel) BIT(8 + (channel)) +#define DMAC_CMD_START_R_(channel) BIT(4 + (channel)) +#define DMAC_CMD_STOP_R_(channel) BIT(0 + (channel)) + +#define DMAC_INT_STS (0xC10) +#define DMAC_INT_EN_SET (0xC14) +#define DMAC_INT_EN_CLR (0xC18) +#define DMAC_INT_BIT_RXFRM_(channel) BIT(16 + (channel)) +#define DMAC_INT_BIT_TX_IOC_(channel) BIT(0 + (channel)) + +#define RX_CFG_A(channel) (0xC40 + ((channel) << 6)) +#define RX_CFG_A_RX_WB_ON_INT_TMR_ BIT(30) +#define RX_CFG_A_RX_WB_THRES_MASK_ (0x1F000000) +#define RX_CFG_A_RX_WB_THRES_SET_(val) \ + ((((u32)(val)) << 24) & RX_CFG_A_RX_WB_THRES_MASK_) +#define RX_CFG_A_RX_PF_THRES_MASK_ (0x001F0000) +#define RX_CFG_A_RX_PF_THRES_SET_(val) \ + ((((u32)(val)) << 16) & RX_CFG_A_RX_PF_THRES_MASK_) +#define RX_CFG_A_RX_PF_PRI_THRES_MASK_ (0x00001F00) +#define RX_CFG_A_RX_PF_PRI_THRES_SET_(val) \ + ((((u32)(val)) << 8) & RX_CFG_A_RX_PF_PRI_THRES_MASK_) +#define RX_CFG_A_RX_HP_WB_EN_ BIT(5) + +#define RX_CFG_B(channel) (0xC44 + ((channel) << 6)) +#define RX_CFG_B_TS_ALL_RX_ BIT(29) +#define RX_CFG_B_RX_PAD_MASK_ (0x03000000) +#define RX_CFG_B_RX_PAD_0_ (0x00000000) +#define RX_CFG_B_RX_PAD_2_ (0x02000000) +#define RX_CFG_B_RDMABL_512_ (0x00040000) +#define RX_CFG_B_RX_RING_LEN_MASK_ (0x0000FFFF) + +#define RX_BASE_ADDRH(channel) (0xC48 + ((channel) << 6)) + +#define RX_BASE_ADDRL(channel) (0xC4C + ((channel) << 6)) + +#define RX_HEAD_WRITEBACK_ADDRH(channel) (0xC50 + ((channel) << 6)) + +#define RX_HEAD_WRITEBACK_ADDRL(channel) (0xC54 + ((channel) << 6)) + +#define RX_HEAD(channel) (0xC58 + ((channel) << 6)) + +#define RX_TAIL(channel) (0xC5C + ((channel) << 6)) +#define RX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define RX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define RX_CFG_C(channel) (0xC64 + ((channel) << 6)) +#define RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define RX_CFG_C_RX_INT_EN_R2C_ BIT(4) +#define RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +#define TX_CFG_A(channel) (0xD40 + ((channel) << 6)) +#define TX_CFG_A_TX_HP_WB_ON_INT_TMR_ BIT(30) +#define TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ (0x10000000) +#define TX_CFG_A_TX_PF_THRES_MASK_ (0x001F0000) +#define TX_CFG_A_TX_PF_THRES_SET_(value) \ + ((((u32)(value)) << 16) & TX_CFG_A_TX_PF_THRES_MASK_) +#define TX_CFG_A_TX_PF_PRI_THRES_MASK_ (0x00001F00) +#define TX_CFG_A_TX_PF_PRI_THRES_SET_(value) \ + ((((u32)(value)) << 8) & TX_CFG_A_TX_PF_PRI_THRES_MASK_) +#define TX_CFG_A_TX_HP_WB_EN_ BIT(5) +#define TX_CFG_A_TX_HP_WB_THRES_MASK_ (0x0000000F) +#define TX_CFG_A_TX_HP_WB_THRES_SET_(value) \ + (((u32)(value)) & TX_CFG_A_TX_HP_WB_THRES_MASK_) + +#define TX_CFG_B(channel) (0xD44 + ((channel) << 6)) +#define TX_CFG_B_TDMABL_512_ (0x00040000) +#define TX_CFG_B_TX_RING_LEN_MASK_ (0x0000FFFF) + +#define TX_BASE_ADDRH(channel) (0xD48 + ((channel) << 6)) + +#define TX_BASE_ADDRL(channel) (0xD4C + ((channel) << 6)) + +#define TX_HEAD_WRITEBACK_ADDRH(channel) (0xD50 + ((channel) << 6)) + +#define TX_HEAD_WRITEBACK_ADDRL(channel) (0xD54 + ((channel) << 6)) + +#define TX_HEAD(channel) (0xD58 + ((channel) << 6)) + +#define TX_TAIL(channel) (0xD5C + ((channel) << 6)) +#define TX_TAIL_SET_DMAC_INT_EN_ BIT(31) +#define TX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define TX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define TX_CFG_C(channel) (0xD64 + ((channel) << 6)) +#define TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define TX_CFG_C_TX_DMA_INT_EN_AUTO_CLR_ BIT(5) +#define TX_CFG_C_TX_INT_EN_R2C_ BIT(4) +#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +/* MAC statistics registers */ +#define STAT_RX_FCS_ERRORS (0x1200) +#define STAT_RX_ALIGNMENT_ERRORS (0x1204) +#define STAT_RX_JABBER_ERRORS (0x120C) +#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210) +#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214) +#define STAT_RX_DROPPED_FRAMES (0x1218) +#define STAT_RX_UNICAST_BYTE_COUNT (0x121C) +#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220) +#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224) +#define STAT_RX_MULTICAST_FRAMES (0x1230) +#define STAT_RX_TOTAL_FRAMES (0x1254) + +#define STAT_TX_FCS_ERRORS (0x1280) +#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284) +#define STAT_TX_CARRIER_ERRORS (0x1288) +#define STAT_TX_SINGLE_COLLISIONS (0x1290) +#define STAT_TX_MULTIPLE_COLLISIONS (0x1294) +#define STAT_TX_EXCESSIVE_COLLISION (0x1298) +#define STAT_TX_LATE_COLLISIONS (0x129C) +#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0) +#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4) +#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8) +#define STAT_TX_MULTICAST_FRAMES (0x12B4) +#define STAT_TX_TOTAL_FRAMES (0x12D8) + +/* End of Register definitions */ + +#define LAN743X_MAX_RX_CHANNELS (4) +#define LAN743X_MAX_TX_CHANNELS (1) +struct lan743x_adapter; + +#define LAN743X_USED_RX_CHANNELS (4) +#define LAN743X_USED_TX_CHANNELS (1) +#define LAN743X_INT_MOD (400) + +#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS) +#error Invalid LAN743X_USED_RX_CHANNELS +#endif +#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS) +#error Invalid LAN743X_USED_TX_CHANNELS +#endif + +/* PCI */ +/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ +#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR +#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) + +#define PCI_CONFIG_LENGTH (0x1000) + +/* CSR */ +#define CSR_LENGTH (0x2000) + +#define LAN743X_CSR_FLAG_IS_A0 BIT(0) +#define LAN743X_CSR_FLAG_IS_B0 BIT(1) +#define LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR BIT(8) + +struct lan743x_csr { + u32 flags; + u8 __iomem *csr_address; + u32 id_rev; + u32 fpga_rev; +}; + +/* INTERRUPTS */ +typedef void(*lan743x_vector_handler)(void *context, u32 int_sts, u32 flags); + +#define LAN743X_VECTOR_FLAG_IRQ_SHARED BIT(0) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ BIT(1) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C BIT(2) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C BIT(3) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK BIT(4) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR BIT(5) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C BIT(6) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR BIT(7) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET BIT(8) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR BIT(9) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET BIT(10) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR BIT(11) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET BIT(12) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR BIT(13) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET BIT(14) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR BIT(15) + +struct lan743x_vector { + int irq; + u32 flags; + struct lan743x_adapter *adapter; + int vector_index; + u32 int_mask; + lan743x_vector_handler handler; + void *context; +}; + +#define LAN743X_MAX_VECTOR_COUNT (8) + +struct lan743x_intr { + int flags; + + unsigned int irq; + + struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT]; + int number_of_vectors; + bool using_vectors; + + int software_isr_flag; +}; + +#define LAN743X_MAX_FRAME_SIZE (9 * 1024) + +/* PHY */ +struct lan743x_phy { + bool fc_autoneg; + u8 fc_request_control; +}; + +/* TX */ +struct lan743x_tx_descriptor; +struct lan743x_tx_buffer_info; + +#define GPIO_QUEUE_STARTED (0) +#define GPIO_TX_FUNCTION (1) +#define GPIO_TX_COMPLETION (2) +#define GPIO_TX_FRAGMENT (3) + +#define TX_FRAME_FLAG_IN_PROGRESS BIT(0) + +struct lan743x_tx { + struct lan743x_adapter *adapter; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_tx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + /* ring_lock: used to prevent concurrent access to tx ring */ + spinlock_t ring_lock; + u32 frame_flags; + u32 frame_first; + u32 frame_data0; + u32 frame_tail; + + struct lan743x_tx_buffer_info *buffer_info; + + u32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + int last_head; + int last_tail; + + struct napi_struct napi; + + struct sk_buff *overflow_skb; +}; + +/* RX */ +struct lan743x_rx_descriptor; +struct lan743x_rx_buffer_info; + +struct lan743x_rx { + struct lan743x_adapter *adapter; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_rx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + + struct lan743x_rx_buffer_info *buffer_info; + + u32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + u32 last_head; + u32 last_tail; + + struct napi_struct napi; + + u32 frame_count; +}; + +struct lan743x_adapter { + struct net_device *netdev; + struct mii_bus *mdiobus; + int msg_enable; + struct pci_dev *pdev; + struct lan743x_csr csr; + struct lan743x_intr intr; + + /* lock, used to prevent concurrent access to data port */ + struct mutex dp_lock; + + u8 mac_address[ETH_ALEN]; + + struct lan743x_phy phy; + struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS]; + struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS]; +}; + +#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) + +#define INTR_FLAG_IRQ_REQUESTED(vector_index) BIT(0 + vector_index) +#define INTR_FLAG_MSI_ENABLED BIT(8) +#define INTR_FLAG_MSIX_ENABLED BIT(9) + +#define MAC_MII_READ 1 +#define MAC_MII_WRITE 0 + +#define PHY_FLAG_OPENED BIT(0) +#define PHY_FLAG_ATTACHED BIT(1) + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) +#else +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(0)) +#endif +#define DMA_ADDR_LOW32(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) +#define DMA_DESCRIPTOR_SPACING_16 (16) +#define DMA_DESCRIPTOR_SPACING_32 (32) +#define DMA_DESCRIPTOR_SPACING_64 (64) +#define DMA_DESCRIPTOR_SPACING_128 (128) +#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES) + +#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \ + (((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0)) +#define DMAC_CHANNEL_STATE_INITIAL DMAC_CHANNEL_STATE_SET(0, 0) +#define DMAC_CHANNEL_STATE_STARTED DMAC_CHANNEL_STATE_SET(1, 0) +#define DMAC_CHANNEL_STATE_STOP_PENDING DMAC_CHANNEL_STATE_SET(1, 1) +#define DMAC_CHANNEL_STATE_STOPPED DMAC_CHANNEL_STATE_SET(0, 1) + +/* TX Descriptor bits */ +#define TX_DESC_DATA0_DTYPE_MASK_ (0xC0000000) +#define TX_DESC_DATA0_DTYPE_DATA_ (0x00000000) +#define TX_DESC_DATA0_DTYPE_EXT_ (0x40000000) +#define TX_DESC_DATA0_FS_ (0x20000000) +#define TX_DESC_DATA0_LS_ (0x10000000) +#define TX_DESC_DATA0_EXT_ (0x08000000) +#define TX_DESC_DATA0_IOC_ (0x04000000) +#define TX_DESC_DATA0_ICE_ (0x00400000) +#define TX_DESC_DATA0_IPE_ (0x00200000) +#define TX_DESC_DATA0_TPE_ (0x00100000) +#define TX_DESC_DATA0_FCS_ (0x00020000) +#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF) +#define TX_DESC_DATA0_EXT_LSO_ (0x00200000) +#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF) +#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000) + +struct lan743x_tx_descriptor { + u32 data0; + u32 data1; + u32 data2; + u32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2) +#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3) +struct lan743x_tx_buffer_info { + int flags; + struct sk_buff *skb; + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_TX_RING_SIZE (50) + +/* OWN bit is set. ie, Descs are owned by RX DMAC */ +#define RX_DESC_DATA0_OWN_ (0x00008000) +/* OWN bit is clear. ie, Descs are owned by host */ +#define RX_DESC_DATA0_FS_ (0x80000000) +#define RX_DESC_DATA0_LS_ (0x40000000) +#define RX_DESC_DATA0_FRAME_LENGTH_MASK_ (0x3FFF0000) +#define RX_DESC_DATA0_FRAME_LENGTH_GET_(data0) \ + (((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16) +#define RX_DESC_DATA0_EXT_ (0x00004000) +#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF) +#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF) + +#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2)) +#error NET_IP_ALIGN must be 0 or 2 +#endif + +#define RX_HEAD_PADDING NET_IP_ALIGN + +struct lan743x_rx_descriptor { + u32 data0; + u32 data1; + u32 data2; + u32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +struct lan743x_rx_buffer_info { + int flags; + struct sk_buff *skb; + + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_RX_RING_SIZE (65) + +#define RX_PROCESS_RESULT_NOTHING_TO_DO (0) +#define RX_PROCESS_RESULT_PACKET_RECEIVED (1) +#define RX_PROCESS_RESULT_PACKET_DROPPED (2) + +#endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 2521c8c40015..b2d2ec8c11e2 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -266,7 +266,7 @@ MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat"); /* Careful: must be accessed under kernel_param_lock() */ static char *myri10ge_fw_name = NULL; -module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); +module_param(myri10ge_fw_name, charp, 0644); MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); #define MYRI10GE_MAX_BOARDS 8 @@ -277,49 +277,49 @@ module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL, MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board"); static int myri10ge_ecrc_enable = 1; -module_param(myri10ge_ecrc_enable, int, S_IRUGO); +module_param(myri10ge_ecrc_enable, int, 0444); MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); static int myri10ge_small_bytes = -1; /* -1 == auto */ -module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); +module_param(myri10ge_small_bytes, int, 0644); MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); static int myri10ge_msi = 1; /* enable msi by default */ -module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); +module_param(myri10ge_msi, int, 0644); MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); static int myri10ge_intr_coal_delay = 75; -module_param(myri10ge_intr_coal_delay, int, S_IRUGO); +module_param(myri10ge_intr_coal_delay, int, 0444); MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); static int myri10ge_flow_control = 1; -module_param(myri10ge_flow_control, int, S_IRUGO); +module_param(myri10ge_flow_control, int, 0444); MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); static int myri10ge_deassert_wait = 1; -module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); +module_param(myri10ge_deassert_wait, int, 0644); MODULE_PARM_DESC(myri10ge_deassert_wait, "Wait when deasserting legacy interrupts"); static int myri10ge_force_firmware = 0; -module_param(myri10ge_force_firmware, int, S_IRUGO); +module_param(myri10ge_force_firmware, int, 0444); MODULE_PARM_DESC(myri10ge_force_firmware, "Force firmware to assume aligned completions"); static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; -module_param(myri10ge_initial_mtu, int, S_IRUGO); +module_param(myri10ge_initial_mtu, int, 0444); MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); static int myri10ge_napi_weight = 64; -module_param(myri10ge_napi_weight, int, S_IRUGO); +module_param(myri10ge_napi_weight, int, 0444); MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); static int myri10ge_watchdog_timeout = 1; -module_param(myri10ge_watchdog_timeout, int, S_IRUGO); +module_param(myri10ge_watchdog_timeout, int, 0444); MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); static int myri10ge_max_irq_loops = 1048576; -module_param(myri10ge_max_irq_loops, int, S_IRUGO); +module_param(myri10ge_max_irq_loops, int, 0444); MODULE_PARM_DESC(myri10ge_max_irq_loops, "Set stuck legacy IRQ detection threshold"); @@ -330,21 +330,21 @@ module_param(myri10ge_debug, int, 0); MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); static int myri10ge_fill_thresh = 256; -module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); +module_param(myri10ge_fill_thresh, int, 0644); MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); static int myri10ge_reset_recover = 1; static int myri10ge_max_slices = 1; -module_param(myri10ge_max_slices, int, S_IRUGO); +module_param(myri10ge_max_slices, int, 0444); MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; -module_param(myri10ge_rss_hash, int, S_IRUGO); +module_param(myri10ge_rss_hash, int, 0444); MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do"); static int myri10ge_dca = 1; -module_param(myri10ge_dca, int, S_IRUGO); +module_param(myri10ge_dca, int, 0444); MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible"); #define MYRI10GE_FW_OFFSET 1024*1024 diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index a10ef50e4f12..017fb2322589 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig @@ -1,16 +1,16 @@ # -# National Semi-conductor device configuration +# National Semiconductor device configuration # config NET_VENDOR_NATSEMI - bool "National Semi-conductor devices" + bool "National Semiconductor devices" default y ---help--- If you have a network (Ethernet) card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about National Semi-conductor devices. If you say Y, + the questions about National Semiconductor devices. If you say Y, you will be asked for your specific card in the following questions. if NET_VENDOR_NATSEMI diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile index cc664977596e..a759aa09ef59 100644 --- a/drivers/net/ethernet/natsemi/Makefile +++ b/drivers/net/ethernet/natsemi/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for the National Semi-conductor Sonic devices. +# Makefile for the National Semiconductor Sonic devices. # obj-$(CONFIG_MACSONIC) += macsonic.o diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index d5b28884e21e..51fa82b429a3 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -60,14 +60,6 @@ do { \ *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \ } while (0) - -/* use 0 for production, 1 for verification, >1 for debug */ -#ifdef SONIC_DEBUG -static unsigned int sonic_debug = SONIC_DEBUG; -#else -static unsigned int sonic_debug = 1; -#endif - /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. @@ -117,7 +109,6 @@ static const struct net_device_ops sonic_netdev_ops = { static int sonic_probe1(struct net_device *dev) { - static unsigned version_printed; unsigned int silicon_revision; unsigned int val; struct sonic_local *lp = netdev_priv(dev); @@ -133,26 +124,17 @@ static int sonic_probe1(struct net_device *dev) * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); - if (sonic_debug > 1) - printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); - i = 0; while (known_revisions[i] != 0xffff && known_revisions[i] != silicon_revision) i++; if (known_revisions[i] == 0xffff) { - printk("SONIC ethernet controller not found (0x%4x)\n", - silicon_revision); + pr_info("SONIC ethernet controller not found (0x%4x)\n", + silicon_revision); goto out; } - if (sonic_debug && version_printed++ == 0) - printk(version); - - printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", - dev_name(lp->device), dev->base_addr); - /* * Put the sonic into software reset, then * retrieve and print the ethernet address. @@ -245,12 +227,16 @@ static int jazz_sonic_probe(struct platform_device *pdev) err = sonic_probe1(dev); if (err) goto out; + + pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", + dev->base_addr, dev->dev_addr, dev->irq); + + sonic_msg_init(dev); + err = register_netdev(dev); if (err) goto out1; - printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); - return 0; out1: @@ -262,8 +248,6 @@ out: } MODULE_DESCRIPTION("Jazz SONIC ethernet driver"); -module_param(sonic_debug, int, 0); -MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)"); MODULE_ALIAS("platform:jazzsonic"); #include "sonic.c" diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index b922ab5cedea..0937fc2a928e 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -60,8 +60,6 @@ #include <asm/macints.h> #include <asm/mac_via.h> -static char mac_sonic_string[] = "macsonic"; - #include "sonic.h" /* These should basically be bus-size and endian independent (since @@ -72,15 +70,6 @@ static char mac_sonic_string[] = "macsonic"; #define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \ + lp->reg_offset)) -/* use 0 for production, 1 for verification, >1 for debug */ -#ifdef SONIC_DEBUG -static unsigned int sonic_debug = SONIC_DEBUG; -#else -static unsigned int sonic_debug = 1; -#endif - -static int sonic_version_printed; - /* For onboard SONIC */ #define ONBOARD_SONIC_REGISTERS 0x50F0A000 #define ONBOARD_SONIC_PROM_BASE 0x50f08000 @@ -313,11 +302,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev) int sr; bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM; - if (!MACH_IS_MAC) - return -ENODEV; - - printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. "); - /* Bogus probing, on the models which may or may not have Ethernet (BTW, the Ethernet *is* always at the same address, and nothing else lives there, at least if Apple's @@ -327,13 +311,11 @@ static int mac_onboard_sonic_probe(struct net_device *dev) card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); if (!card_present) { - printk("none.\n"); + pr_info("Onboard/comm-slot SONIC not found\n"); return -ENODEV; } } - printk("yes\n"); - /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = ONBOARD_SONIC_REGISTERS; @@ -342,18 +324,10 @@ static int mac_onboard_sonic_probe(struct net_device *dev) else dev->irq = IRQ_NUBUS_9; - if (!sonic_version_printed) { - printk(KERN_INFO "%s", version); - sonic_version_printed = 1; - } - printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", - dev_name(lp->device), dev->base_addr); - /* The PowerBook's SONIC is 16 bit always. */ if (macintosh_config->ident == MAC_MODEL_PB520) { lp->reg_offset = 0; lp->dma_bitmode = SONIC_BITMODE16; - sr = SONIC_READ(SONIC_SR); } else if (commslot) { /* Some of the comm-slot cards are 16 bit. But some of them are not. The 32-bit cards use offset 2 and @@ -370,22 +344,21 @@ static int mac_onboard_sonic_probe(struct net_device *dev) else { lp->dma_bitmode = SONIC_BITMODE16; lp->reg_offset = 0; - sr = SONIC_READ(SONIC_SR); } } else { /* All onboard cards are at offset 2 with 32 bit DMA. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE32; - sr = SONIC_READ(SONIC_SR); } - printk(KERN_INFO - "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", - dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset); -#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ - printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), - SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); -#endif + pr_info("Onboard/comm-slot SONIC, revision 0x%04x, %d bit DMA, register offset %d\n", + SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16, + lp->reg_offset); + + /* This is sometimes useful to find out how MacOS configured the card */ + pr_debug("%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, + SONIC_READ(SONIC_DCR) & 0xffff, + SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -406,11 +379,14 @@ static int mac_onboard_sonic_probe(struct net_device *dev) /* Now look for the MAC address. */ mac_onboard_sonic_ethernet_addr(dev); + pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", + dev->base_addr, dev->dev_addr, dev->irq); + /* Shared init code */ return macsonic_init(dev); } -static int mac_nubus_sonic_ethernet_addr(struct net_device *dev, +static int mac_sonic_nubus_ethernet_addr(struct net_device *dev, unsigned long prom_addr, int id) { int i; @@ -449,70 +425,49 @@ static int macsonic_ident(struct nubus_rsrc *fres) return -1; } -static int mac_nubus_sonic_probe(struct net_device *dev) +static int mac_sonic_nubus_probe_board(struct nubus_board *board, int id, + struct net_device *dev) { - static int slots; - struct nubus_rsrc *ndev = NULL; struct sonic_local* lp = netdev_priv(dev); unsigned long base_addr, prom_addr; u16 sonic_dcr; - int id = -1; int reg_offset, dma_bitmode; - /* Find the first SONIC that hasn't been initialized already */ - for_each_func_rsrc(ndev) { - if (ndev->category != NUBUS_CAT_NETWORK || - ndev->type != NUBUS_TYPE_ETHERNET) - continue; - - /* Have we seen it already? */ - if (slots & (1<<ndev->board->slot)) - continue; - slots |= 1<<ndev->board->slot; - - /* Is it one of ours? */ - if ((id = macsonic_ident(ndev)) != -1) - break; - } - - if (ndev == NULL) - return -ENODEV; - switch (id) { case MACSONIC_DUODOCK: - base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS; - prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE; + base_addr = board->slot_addr + DUODOCK_SONIC_REGISTERS; + prom_addr = board->slot_addr + DUODOCK_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 2; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE: - base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; - prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; + base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 0; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE16: - base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; - prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; + base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNALINK: - base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; - prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE; + base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = board->slot_addr + DAYNALINK_PROM_BASE; sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNA: - base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS; - prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR; + base_addr = board->slot_addr + DAYNA_SONIC_REGISTERS; + prom_addr = board->slot_addr + DAYNA_SONIC_MAC_ADDR; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; reg_offset = 0; @@ -528,21 +483,16 @@ static int mac_nubus_sonic_probe(struct net_device *dev) dev->base_addr = base_addr; lp->reg_offset = reg_offset; lp->dma_bitmode = dma_bitmode; - dev->irq = SLOT2IRQ(ndev->board->slot); + dev->irq = SLOT2IRQ(board->slot); - if (!sonic_version_printed) { - printk(KERN_INFO "%s", version); - sonic_version_printed = 1; - } - printk(KERN_INFO "%s: %s in slot %X\n", - dev_name(lp->device), ndev->board->name, ndev->board->slot); - printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", - dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); + dev_info(&board->dev, "%s, revision 0x%04x, %d bit DMA, register offset %d\n", + board->name, SONIC_READ(SONIC_SR), + lp->dma_bitmode ? 32 : 16, lp->reg_offset); -#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ - printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), - SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); -#endif + /* This is sometimes useful to find out how MacOS configured the card */ + dev_dbg(&board->dev, "%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, + SONIC_READ(SONIC_DCR) & 0xffff, + SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -557,14 +507,17 @@ static int mac_nubus_sonic_probe(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ - if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0) + if (mac_sonic_nubus_ethernet_addr(dev, prom_addr, id) != 0) return -ENODEV; + dev_info(&board->dev, "SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", + dev->base_addr, dev->dev_addr, dev->irq); + /* Shared init code */ return macsonic_init(dev); } -static int mac_sonic_probe(struct platform_device *pdev) +static int mac_sonic_platform_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; @@ -579,22 +532,16 @@ static int mac_sonic_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - /* This will catch fatal stuff like -ENOMEM as well as success */ err = mac_onboard_sonic_probe(dev); - if (err == 0) - goto found; - if (err != -ENODEV) - goto out; - err = mac_nubus_sonic_probe(dev); if (err) goto out; -found: + + sonic_msg_init(dev); + err = register_netdev(dev); if (err) goto out; - printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); - return 0; out: @@ -604,13 +551,11 @@ out: } MODULE_DESCRIPTION("Macintosh SONIC ethernet driver"); -module_param(sonic_debug, int, 0); -MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)"); MODULE_ALIAS("platform:macsonic"); #include "sonic.c" -static int mac_sonic_device_remove(struct platform_device *pdev) +static int mac_sonic_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); @@ -623,12 +568,105 @@ static int mac_sonic_device_remove(struct platform_device *pdev) return 0; } -static struct platform_driver mac_sonic_driver = { - .probe = mac_sonic_probe, - .remove = mac_sonic_device_remove, - .driver = { - .name = mac_sonic_string, +static struct platform_driver mac_sonic_platform_driver = { + .probe = mac_sonic_platform_probe, + .remove = mac_sonic_platform_remove, + .driver = { + .name = "macsonic", + }, +}; + +static int mac_sonic_nubus_probe(struct nubus_board *board) +{ + struct net_device *ndev; + struct sonic_local *lp; + struct nubus_rsrc *fres; + int id = -1; + int err; + + /* The platform driver will handle a PDS or Comm Slot card (even if + * it has a pseudoslot declaration ROM). + */ + if (macintosh_config->expansion_type == MAC_EXP_PDS_COMM) + return -ENODEV; + + for_each_board_func_rsrc(board, fres) { + if (fres->category != NUBUS_CAT_NETWORK || + fres->type != NUBUS_TYPE_ETHERNET) + continue; + + id = macsonic_ident(fres); + if (id != -1) + break; + } + if (!fres) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct sonic_local)); + if (!ndev) + return -ENOMEM; + + lp = netdev_priv(ndev); + lp->device = &board->dev; + SET_NETDEV_DEV(ndev, &board->dev); + + err = mac_sonic_nubus_probe_board(board, id, ndev); + if (err) + goto out; + + sonic_msg_init(ndev); + + err = register_netdev(ndev); + if (err) + goto out; + + nubus_set_drvdata(board, ndev); + + return 0; + +out: + free_netdev(ndev); + return err; +} + +static int mac_sonic_nubus_remove(struct nubus_board *board) +{ + struct net_device *ndev = nubus_get_drvdata(board); + struct sonic_local *lp = netdev_priv(ndev); + + unregister_netdev(ndev); + dma_free_coherent(lp->device, + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); + free_netdev(ndev); + + return 0; +} + +static struct nubus_driver mac_sonic_nubus_driver = { + .probe = mac_sonic_nubus_probe, + .remove = mac_sonic_nubus_remove, + .driver = { + .name = "macsonic-nubus", + .owner = THIS_MODULE, }, }; -module_platform_driver(mac_sonic_driver); +static int perr, nerr; + +static int __init mac_sonic_init(void) +{ + perr = platform_driver_register(&mac_sonic_platform_driver); + nerr = nubus_driver_register(&mac_sonic_nubus_driver); + return 0; +} +module_init(mac_sonic_init); + +static void __exit mac_sonic_exit(void) +{ + if (!perr) + platform_driver_unregister(&mac_sonic_platform_driver); + if (!nerr) + nubus_driver_unregister(&mac_sonic_nubus_driver); +} +module_exit(mac_sonic_exit); diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 612c7a44b26c..7ed08486ae23 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -33,7 +33,21 @@ * the NetBSD file "sys/arch/mac68k/dev/if_sn.c". */ +static unsigned int version_printed; +static int sonic_debug = -1; +module_param(sonic_debug, int, 0); +MODULE_PARM_DESC(sonic_debug, "debug message level"); + +static void sonic_msg_init(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + + lp->msg_enable = netif_msg_init(sonic_debug, 0); + + if (version_printed++ == 0) + netif_dbg(lp, drv, dev, "%s", version); +} /* * Open/initialize the SONIC controller. @@ -47,8 +61,7 @@ static int sonic_open(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int i; - if (sonic_debug > 2) - printk("sonic_open: initializing sonic driver.\n"); + netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); @@ -95,8 +108,7 @@ static int sonic_open(struct net_device *dev) netif_start_queue(dev); - if (sonic_debug > 2) - printk("sonic_open: Initialization done.\n"); + netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__); return 0; } @@ -110,8 +122,7 @@ static int sonic_close(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int i; - if (sonic_debug > 2) - printk("sonic_close\n"); + netif_dbg(lp, ifdown, dev, "%s\n", __func__); netif_stop_queue(dev); @@ -205,8 +216,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) int length; int entry = lp->next_tx; - if (sonic_debug > 2) - printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); + netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); length = skb->len; if (length < ETH_ZLEN) { @@ -252,14 +262,12 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) lp->next_tx = (entry + 1) & SONIC_TDS_MASK; if (lp->tx_skb[lp->next_tx] != NULL) { /* The ring is full, the ISR has yet to process the next TD. */ - if (sonic_debug > 3) - printk("%s: stopping queue\n", dev->name); + netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__); netif_stop_queue(dev); /* after this packet, wait for ISR to free up some TDAs */ } else netif_start_queue(dev); - if (sonic_debug > 2) - printk("sonic_send_packet: issuing Tx command\n"); + netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); @@ -281,8 +289,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) do { if (status & SONIC_INT_PKTRX) { - if (sonic_debug > 2) - printk("%s: packet rx\n", dev->name); + netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } @@ -299,8 +306,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) */ - if (sonic_debug > 2) - printk("%s: tx done\n", dev->name); + netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); while (lp->tx_skb[entry] != NULL) { if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) @@ -346,20 +352,20 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) * check error conditions */ if (status & SONIC_INT_RFO) { - if (sonic_debug > 1) - printk("%s: rx fifo overrun\n", dev->name); + netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", + __func__); lp->stats.rx_fifo_errors++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { - if (sonic_debug > 1) - printk("%s: rx descriptors exhausted\n", dev->name); + netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", + __func__); lp->stats.rx_dropped++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { - if (sonic_debug > 1) - printk("%s: rx buffer area exceeded\n", dev->name); + netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", + __func__); lp->stats.rx_dropped++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } @@ -380,8 +386,9 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* transmit error */ if (status & SONIC_INT_TXER) { - if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2)) - printk(KERN_ERR "%s: tx fifo underrun\n", dev->name); + if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) + netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", + __func__); SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ } @@ -475,8 +482,8 @@ static void sonic_rx(struct net_device *dev) if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; SONIC_WRITE(SONIC_RWP, lp->cur_rwp); if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - if (sonic_debug > 2) - printk("%s: rx buffer exhausted\n", dev->name); + netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", + __func__); SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ } } else @@ -542,9 +549,8 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { - if (sonic_debug > 2) - printk("sonic_multicast_list: mc_count %d\n", - netdev_mc_count(dev)); + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, + netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; netdev_for_each_mc_addr(ha, dev) { @@ -562,8 +568,7 @@ static void sonic_multicast_list(struct net_device *dev) } } - if (sonic_debug > 2) - printk("sonic_multicast_list: setting RCR=%x\n", rcr); + netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr); SONIC_WRITE(SONIC_RCR, rcr); } @@ -596,8 +601,8 @@ static int sonic_init(struct net_device *dev) /* * initialize the receive resource area */ - if (sonic_debug > 2) - printk("sonic_init: initialize receive resource area\n"); + netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n", + __func__); for (i = 0; i < SONIC_NUM_RRS; i++) { u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff; @@ -622,8 +627,7 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); /* load the resource pointers */ - if (sonic_debug > 3) - printk("sonic_init: issuing RRRA command\n"); + netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); i = 0; @@ -632,16 +636,17 @@ static int sonic_init(struct net_device *dev) break; } - if (sonic_debug > 2) - printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i); + netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, + SONIC_READ(SONIC_CMD), i); /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ - if (sonic_debug > 2) - printk("sonic_init: initialize receive descriptors\n"); + netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n", + __func__); + for (i=0; i<SONIC_NUM_RDS; i++) { sonic_rda_put(dev, i, SONIC_RD_STATUS, 0); sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0); @@ -664,8 +669,9 @@ static int sonic_init(struct net_device *dev) /* * initialize transmit descriptors */ - if (sonic_debug > 2) - printk("sonic_init: initialize transmit descriptors\n"); + netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n", + __func__); + for (i = 0; i < SONIC_NUM_TDS; i++) { sonic_tda_put(dev, i, SONIC_TD_STATUS, 0); sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0); @@ -712,10 +718,8 @@ static int sonic_init(struct net_device *dev) if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) break; } - if (sonic_debug > 2) { - printk("sonic_init: CMD=%x, ISR=%x\n, i=%d", - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); - } + netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, + SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); /* * enable receiver, disable loopback @@ -731,9 +735,8 @@ static int sonic_init(struct net_device *dev) if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); - if (sonic_debug > 2) - printk("sonic_init: new status=%x\n", - SONIC_READ(SONIC_CMD)); + netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, + SONIC_READ(SONIC_CMD)); return 0; } diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h index 421b1a283fed..2b27f7049acb 100644 --- a/drivers/net/ethernet/natsemi/sonic.h +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -319,6 +319,7 @@ struct sonic_local { unsigned int eol_rx; unsigned int eol_tx; /* last unacked transmit packet */ unsigned int next_tx; /* next free TD */ + int msg_enable; struct device *device; /* generic device */ struct net_device_stats stats; }; @@ -336,6 +337,7 @@ static struct net_device_stats *sonic_get_stats(struct net_device *dev); static void sonic_multicast_list(struct net_device *dev); static int sonic_init(struct net_device *dev); static void sonic_tx_timeout(struct net_device *dev); +static void sonic_msg_init(struct net_device *dev); /* Internal inlines for reading/writing DMA buffers. Note that bus size and endianness matter here, whereas they don't for registers, diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 1817deea98a4..e1b886e87a76 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -73,14 +73,6 @@ extern void xtboard_get_ether_addr(unsigned char *buf); #define SONIC_WRITE(reg,val) \ *((volatile unsigned int *)dev->base_addr+reg) = val - -/* Use 0 for production, 1 for verification, and >2 for debug */ -#ifdef SONIC_DEBUG -static unsigned int sonic_debug = SONIC_DEBUG; -#else -static unsigned int sonic_debug = 1; -#endif - /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. @@ -130,7 +122,6 @@ static const struct net_device_ops xtsonic_netdev_ops = { static int __init sonic_probe1(struct net_device *dev) { - static unsigned version_printed = 0; unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; @@ -146,23 +137,17 @@ static int __init sonic_probe1(struct net_device *dev) * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); - if (sonic_debug > 1) - printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); - i = 0; while ((known_revisions[i] != 0xffff) && (known_revisions[i] != silicon_revision)) i++; if (known_revisions[i] == 0xffff) { - printk("SONIC ethernet controller not found (0x%4x)\n", - silicon_revision); + pr_info("SONIC ethernet controller not found (0x%4x)\n", + silicon_revision); return -ENODEV; } - if (sonic_debug && version_printed++ == 0) - printk(version); - /* * Put the sonic into software reset, then retrieve ethernet address. * Note: we are assuming that the boot-loader has initialized the cam. @@ -273,12 +258,15 @@ int xtsonic_probe(struct platform_device *pdev) if ((err = sonic_probe1(dev))) goto out; + + pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", + dev->base_addr, dev->dev_addr, dev->irq); + + sonic_msg_init(dev); + if ((err = register_netdev(dev))) goto out1; - printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name, - dev->base_addr, dev->dev_addr, dev->irq); - return 0; out1: @@ -290,8 +278,6 @@ out: } MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); -module_param(sonic_debug, int, 0); -MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)"); #include "sonic.c" diff --git a/drivers/net/ethernet/netronome/nfp/bpf/Makefile b/drivers/net/ethernet/netronome/nfp/bpf/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/bpf/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 80d3aa0fc9d3..7e298148ca26 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -218,17 +218,17 @@ nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb, return skb; hdr = (struct cmsg_hdr *)skb->data; - /* 0 reply_size means caller will do the validation */ - if (reply_size && skb->len != reply_size) { - cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n", - skb->len, reply_size); - goto err_free; - } if (hdr->type != __CMSG_REPLY(type)) { cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n", hdr->type, __CMSG_REPLY(type)); goto err_free; } + /* 0 reply_size means caller will do the validation */ + if (reply_size && skb->len != reply_size) { + cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n", + type, skb->len, reply_size); + goto err_free; + } return skb; err_free: diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index cfcc7bcb2c67..39639ac28b01 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -41,6 +41,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_FUNC = 1, NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, NFP_BPF_CAP_TYPE_MAPS = 3, + NFP_BPF_CAP_TYPE_RANDOM = 4, }; struct nfp_bpf_cap_tlv_func { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 56451edf01c2..29b4e5f8c102 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -74,7 +74,9 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) { - if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { + if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) { + pr_warn("instruction limit reached (%u NFP instructions)\n", + nfp_prog->prog_len); nfp_prog->error = -ENOSPC; return; } @@ -103,23 +105,18 @@ nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) /* --- Emitters --- */ static void __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, - u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) + u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx, + bool indir) { - enum cmd_ctx_swap ctx; u64 insn; - if (sync) - ctx = CMD_CTX_SWAP; - else - ctx = CMD_CTX_NO_SWAP; - insn = FIELD_PREP(OP_CMD_A_SRC, areg) | FIELD_PREP(OP_CMD_CTX, ctx) | FIELD_PREP(OP_CMD_B_SRC, breg) | FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) | FIELD_PREP(OP_CMD_XFER, xfer) | FIELD_PREP(OP_CMD_CNT, size) | - FIELD_PREP(OP_CMD_SIG, sync) | + FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) | FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | FIELD_PREP(OP_CMD_INDIR, indir) | FIELD_PREP(OP_CMD_MODE, mode); @@ -129,7 +126,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, static void emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync, bool indir) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir) { struct nfp_insn_re_regs reg; int err; @@ -150,22 +147,22 @@ emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, return; } - __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, + __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx, indir); } static void emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) { - emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false); } static void emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, - swreg lreg, swreg rreg, u8 size, bool sync) + swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx) { - emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true); } static void @@ -410,7 +407,7 @@ __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, FIELD_PREP(OP_LCSR_A_SRC, areg) | FIELD_PREP(OP_LCSR_B_SRC, breg) | FIELD_PREP(OP_LCSR_WRITE, wr) | - FIELD_PREP(OP_LCSR_ADDR, addr) | + FIELD_PREP(OP_LCSR_ADDR, addr / 4) | FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); @@ -438,10 +435,16 @@ static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) return; } - __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, + __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr, false, reg.src_lmextn); } +/* CSR value is read in following immed[gpr, 0] */ +static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr) +{ + __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false); +} + static void emit_nop(struct nfp_prog *nfp_prog) { __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); @@ -553,6 +556,19 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); } +/* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the + * result to @dst from offset, there is no change on the other bits of @dst. + */ +static void +wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, + u8 field_len, u8 offset) +{ + enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE; + u8 mask = ((1 << field_len) - 1) << offset; + + emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8); +} + static void addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, swreg *rega, swreg *regb) @@ -597,7 +613,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* Memory read from source addr into transfer-in registers. */ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, - src_base, off, xfer_num - 1, true, len > 32); + src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32); /* Move from transfer-in to transfer-out. */ for (i = 0; i < xfer_num; i++) @@ -609,39 +625,39 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* Use single direct_ref write8. */ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, len - 1, - true); + CMD_CTX_SWAP); } else if (len <= 32 && IS_ALIGNED(len, 4)) { /* Use single direct_ref write32. */ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, - true); + CMD_CTX_SWAP); } else if (len <= 32) { /* Use single indirect_ref write8. */ wrp_immed(nfp_prog, reg_none(), CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - len - 1, true); + len - 1, CMD_CTX_SWAP); } else if (IS_ALIGNED(len, 4)) { /* Use single indirect_ref write32. */ wrp_immed(nfp_prog, reg_none(), CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - xfer_num - 1, true); + xfer_num - 1, CMD_CTX_SWAP); } else if (len <= 40) { /* Use one direct_ref write32 to write the first 32-bytes, then * another direct_ref write8 to write the remaining bytes. */ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, 7, - true); + CMD_CTX_SWAP); off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, imm_b(nfp_prog)); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, reg_a(meta->paired_st->dst_reg * 2), off, len - 33, - true); + CMD_CTX_SWAP); } else { /* Use one indirect_ref write32 to write 4-bytes aligned length, * then another direct_ref write8 to write the remaining bytes. @@ -652,12 +668,12 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, reg_a(meta->paired_st->dst_reg * 2), off, - xfer_num - 2, true); + xfer_num - 2, CMD_CTX_SWAP); new_off = meta->paired_st->off + (xfer_num - 1) * 4; off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, - (len & 0x3) - 1, true); + (len & 0x3) - 1, CMD_CTX_SWAP); } /* TODO: The following extra load is to make sure data flow be identical @@ -718,7 +734,7 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) shift = size < 4 ? 4 - size : 0; emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pptr_reg(nfp_prog), offset, sz - 1, true); + pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP); i = 0; if (shift) @@ -748,7 +764,7 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, mask = size < 4 ? GENMASK(size - 1, 0) : 0; emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, - lreg, rreg, sz / 4 - 1, true); + lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP); i = 0; if (mask) @@ -828,7 +844,7 @@ data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, - reg_a(dst_gpr), offset, size - 1, true); + reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); return 0; } @@ -842,7 +858,7 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, - reg_a(dst_gpr), offset, size - 1, true); + reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP); return 0; } @@ -1339,7 +1355,7 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } static int -map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { struct bpf_offloaded_map *offmap; struct nfp_bpf_map *nfp_map; @@ -1353,19 +1369,21 @@ map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* We only have to reload LM0 if the key is not at start of stack */ lm_off = nfp_prog->stack_depth; - lm_off += meta->arg2.var_off.value + meta->arg2.off; - load_lm_ptr = meta->arg2_var_off || lm_off; + lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; + load_lm_ptr = meta->arg2.var_off || lm_off; /* Set LM0 to start of key */ if (load_lm_ptr) emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); + if (meta->func_id == BPF_FUNC_map_update_elem) + emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); /* Load map ID into a register, it should actually fit as an immediate * but in case it doesn't deal with it here, not in the delay slots. */ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); - emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem, + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 2, RELO_BR_HELPER); ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; @@ -1388,6 +1406,18 @@ map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM); + /* CSR value is read in following immed[gpr, 0] */ + emit_immed(nfp_prog, reg_both(0), 0, + IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); + emit_immed(nfp_prog, reg_both(1), 0, + IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -1838,6 +1868,128 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, tmp_reg, meta->insn.dst_reg * 2, size); } +static void +mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta) +{ + s16 range_start = meta->pkt_cache.range_start; + s16 range_end = meta->pkt_cache.range_end; + swreg src_base, off; + u8 xfer_num, len; + bool indir; + + off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog)); + src_base = reg_a(meta->insn.src_reg * 2); + len = range_end - range_start; + xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH; + + indir = len > 8 * REG_WIDTH; + /* Setup PREV_ALU for indirect mode. */ + if (indir) + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); + + /* Cache memory into transfer-in registers. */ + emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, + off, xfer_num - 1, CMD_CTX_SWAP, indir); +} + +static int +mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, + unsigned int size) +{ + s16 range_start = meta->pkt_cache.range_start; + s16 insn_off = meta->insn.off - range_start; + swreg dst_lo, dst_hi, src_lo, src_mid; + u8 dst_gpr = meta->insn.dst_reg * 2; + u8 len_lo = size, len_mid = 0; + u8 idx = insn_off / REG_WIDTH; + u8 off = insn_off % REG_WIDTH; + + dst_hi = reg_both(dst_gpr + 1); + dst_lo = reg_both(dst_gpr); + src_lo = reg_xfer(idx); + + /* The read length could involve as many as three registers. */ + if (size > REG_WIDTH - off) { + /* Calculate the part in the second register. */ + len_lo = REG_WIDTH - off; + len_mid = size - len_lo; + + /* Calculate the part in the third register. */ + if (size > 2 * REG_WIDTH - off) + len_mid = REG_WIDTH; + } + + wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off); + + if (!len_mid) { + wrp_immed(nfp_prog, dst_hi, 0); + return 0; + } + + src_mid = reg_xfer(idx + 1); + + if (size <= REG_WIDTH) { + wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo); + wrp_immed(nfp_prog, dst_hi, 0); + } else { + swreg src_hi = reg_xfer(idx + 2); + + wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, + REG_WIDTH - len_lo, len_lo); + wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo, + REG_WIDTH - len_lo); + wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo, + len_lo); + } + + return 0; +} + +static int +mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, + unsigned int size) +{ + swreg dst_lo, dst_hi, src_lo; + u8 dst_gpr, idx; + + idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH; + dst_gpr = meta->insn.dst_reg * 2; + dst_hi = reg_both(dst_gpr + 1); + dst_lo = reg_both(dst_gpr); + src_lo = reg_xfer(idx); + + if (size < REG_WIDTH) { + wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0); + wrp_immed(nfp_prog, dst_hi, 0); + } else if (size == REG_WIDTH) { + wrp_mov(nfp_prog, dst_lo, src_lo); + wrp_immed(nfp_prog, dst_hi, 0); + } else { + swreg src_hi = reg_xfer(idx + 1); + + wrp_mov(nfp_prog, dst_lo, src_lo); + wrp_mov(nfp_prog, dst_hi, src_hi); + } + + return 0; +} + +static int +mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, unsigned int size) +{ + u8 off = meta->insn.off - meta->pkt_cache.range_start; + + if (IS_ALIGNED(off, REG_WIDTH)) + return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size); + + return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size); +} + static int mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) @@ -1852,8 +2004,16 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return mem_ldx_skb(nfp_prog, meta, size); } - if (meta->ptr.type == PTR_TO_PACKET) - return mem_ldx_data(nfp_prog, meta, size); + if (meta->ptr.type == PTR_TO_PACKET) { + if (meta->pkt_cache.range_end) { + if (meta->pkt_cache.do_init) + mem_ldx_data_init_pktcache(nfp_prog, meta); + + return mem_ldx_data_from_pktcache(nfp_prog, meta, size); + } else { + return mem_ldx_data(nfp_prog, meta, size); + } + } if (meta->ptr.type == PTR_TO_STACK) return mem_ldx_stack(nfp_prog, meta, size, @@ -1982,6 +2142,111 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return mem_stx(nfp_prog, meta, 8); } +static int +mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64) +{ + u8 dst_gpr = meta->insn.dst_reg * 2; + u8 src_gpr = meta->insn.src_reg * 2; + unsigned int full_add, out; + swreg addra, addrb, off; + + off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + /* We can fit 16 bits into command immediate, if we know the immediate + * is guaranteed to either always or never fit into 16 bit we only + * generate code to handle that particular case, otherwise generate + * code for both. + */ + out = nfp_prog_current_offset(nfp_prog); + full_add = nfp_prog_current_offset(nfp_prog); + + if (meta->insn.off) { + out += 2; + full_add += 2; + } + if (meta->xadd_maybe_16bit) { + out += 3; + full_add += 3; + } + if (meta->xadd_over_16bit) + out += 2 + is64; + if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { + out += 5; + full_add += 5; + } + + /* Generate the branch for choosing add_imm vs add */ + if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) { + swreg max_imm = imm_a(nfp_prog); + + wrp_immed(nfp_prog, max_imm, 0xffff); + emit_alu(nfp_prog, reg_none(), + max_imm, ALU_OP_SUB, reg_b(src_gpr)); + emit_alu(nfp_prog, reg_none(), + reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1)); + emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0); + /* defer for add */ + } + + /* If insn has an offset add to the address */ + if (!meta->insn.off) { + addra = reg_a(dst_gpr); + addrb = reg_b(dst_gpr + 1); + } else { + emit_alu(nfp_prog, imma_a(nfp_prog), + reg_a(dst_gpr), ALU_OP_ADD, off); + emit_alu(nfp_prog, imma_b(nfp_prog), + reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0)); + addra = imma_a(nfp_prog); + addrb = imma_b(nfp_prog); + } + + /* Generate the add_imm if 16 bits are possible */ + if (meta->xadd_maybe_16bit) { + swreg prev_alu = imm_a(nfp_prog); + + wrp_immed(nfp_prog, prev_alu, + FIELD_PREP(CMD_OVE_DATA, 2) | + CMD_OVE_LEN | + FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2)); + wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2); + emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0, + addra, addrb, 0, CMD_CTX_NO_SWAP); + + if (meta->xadd_over_16bit) + emit_br(nfp_prog, BR_UNC, out, 0); + } + + if (!nfp_prog_confirm_current_offset(nfp_prog, full_add)) + return -EINVAL; + + /* Generate the add if 16 bits are not guaranteed */ + if (meta->xadd_over_16bit) { + emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0, + addra, addrb, is64 << 2, + is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1); + + wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr)); + if (is64) + wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1)); + } + + if (!nfp_prog_confirm_current_offset(nfp_prog, out)) + return -EINVAL; + + return 0; +} + +static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_xadd(nfp_prog, meta, false); +} + +static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_xadd(nfp_prog, meta, true); +} + static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); @@ -2183,7 +2448,11 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) case BPF_FUNC_xdp_adjust_head: return adjust_head(nfp_prog, meta); case BPF_FUNC_map_lookup_elem: - return map_lookup_stack(nfp_prog, meta); + case BPF_FUNC_map_update_elem: + case BPF_FUNC_map_delete_elem: + return map_call_stack_common(nfp_prog, meta); + case BPF_FUNC_get_prandom_u32: + return nfp_get_prandom_u32(nfp_prog, meta); default: WARN_ONCE(1, "verifier allowed unsupported function\n"); return -EOPNOTSUPP; @@ -2243,6 +2512,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, + [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, + [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, [BPF_ST | BPF_MEM | BPF_B] = mem_st1, [BPF_ST | BPF_MEM | BPF_H] = mem_st2, [BPF_ST | BPF_MEM | BPF_W] = mem_st4, @@ -2463,6 +2734,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog) err = cb(nfp_prog, meta); if (err) return err; + if (nfp_prog->error) + return nfp_prog->error; nfp_prog->n_translated++; } @@ -2821,6 +3094,120 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) } } +static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta, *range_node = NULL; + s16 range_start = 0, range_end = 0; + bool cache_avail = false; + struct bpf_insn *insn; + s32 range_ptr_off = 0; + u32 range_ptr_id = 0; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (meta->flags & FLAG_INSN_IS_JUMP_DST) + cache_avail = false; + + if (meta->skip) + continue; + + insn = &meta->insn; + + if (is_mbpf_store_pkt(meta) || + insn->code == (BPF_JMP | BPF_CALL) || + is_mbpf_classic_store_pkt(meta) || + is_mbpf_classic_load(meta)) { + cache_avail = false; + continue; + } + + if (!is_mbpf_load(meta)) + continue; + + if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) { + cache_avail = false; + continue; + } + + if (!cache_avail) { + cache_avail = true; + if (range_node) + goto end_current_then_start_new; + goto start_new; + } + + /* Check ID to make sure two reads share the same + * variable offset against PTR_TO_PACKET, and check OFF + * to make sure they also share the same constant + * offset. + * + * OFFs don't really need to be the same, because they + * are the constant offsets against PTR_TO_PACKET, so + * for different OFFs, we could canonicalize them to + * offsets against original packet pointer. We don't + * support this. + */ + if (meta->ptr.id == range_ptr_id && + meta->ptr.off == range_ptr_off) { + s16 new_start = range_start; + s16 end, off = insn->off; + s16 new_end = range_end; + bool changed = false; + + if (off < range_start) { + new_start = off; + changed = true; + } + + end = off + BPF_LDST_BYTES(insn); + if (end > range_end) { + new_end = end; + changed = true; + } + + if (!changed) + continue; + + if (new_end - new_start <= 64) { + /* Install new range. */ + range_start = new_start; + range_end = new_end; + continue; + } + } + +end_current_then_start_new: + range_node->pkt_cache.range_start = range_start; + range_node->pkt_cache.range_end = range_end; +start_new: + range_node = meta; + range_node->pkt_cache.do_init = true; + range_ptr_id = range_node->ptr.id; + range_ptr_off = range_node->ptr.off; + range_start = insn->off; + range_end = insn->off + BPF_LDST_BYTES(insn); + } + + if (range_node) { + range_node->pkt_cache.range_start = range_start; + range_node->pkt_cache.range_end = range_end; + } + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (meta->skip) + continue; + + if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { + if (meta->pkt_cache.do_init) { + range_start = meta->pkt_cache.range_start; + range_end = meta->pkt_cache.range_end; + } else { + meta->pkt_cache.range_start = range_start; + meta->pkt_cache.range_end = range_end; + } + } + } +} + static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { nfp_bpf_opt_reg_init(nfp_prog); @@ -2828,6 +3215,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); nfp_bpf_opt_ldst_gather(nfp_prog); + nfp_bpf_opt_pkt_cache(nfp_prog); return 0; } @@ -2952,6 +3340,12 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) case BPF_FUNC_map_lookup_elem: val = nfp_prog->bpf->helpers.map_lookup; break; + case BPF_FUNC_map_update_elem: + val = nfp_prog->bpf->helpers.map_update; + break; + case BPF_FUNC_map_delete_elem: + val = nfp_prog->bpf->helpers.map_delete; + break; default: pr_err("relocation of unknown helper %d\n", val); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 34e98aa6b956..1dc424685f4e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -221,7 +221,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, } static int -nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) +nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); unsigned int max_mtu; @@ -284,6 +284,12 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) case BPF_FUNC_map_lookup_elem: bpf->helpers.map_lookup = readl(&cap->func_addr); break; + case BPF_FUNC_map_update_elem: + bpf->helpers.map_update = readl(&cap->func_addr); + break; + case BPF_FUNC_map_delete_elem: + bpf->helpers.map_delete = readl(&cap->func_addr); + break; } return 0; @@ -309,6 +315,14 @@ nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) return 0; } +static int +nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + bpf->pseudo_random = true; + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -347,6 +361,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) if (nfp_bpf_parse_cap_maps(app->priv, value, length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_RANDOM: + if (nfp_bpf_parse_cap_random(app->priv, value, length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -413,7 +431,7 @@ const struct nfp_app_type app_bpf = { .init = nfp_bpf_init, .clean = nfp_bpf_clean, - .change_mtu = nfp_bpf_change_mtu, + .check_mtu = nfp_bpf_check_mtu, .extra_cap = nfp_bpf_extra_cap, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 054df3dc0698..4981c8944ca3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -72,6 +72,7 @@ enum nfp_relo_type { #define BR_OFF_RELO 15000 enum static_regs { + STATIC_REG_IMMA = 20, /* Bank AB */ STATIC_REG_IMM = 21, /* Bank AB */ STATIC_REG_STACK = 22, /* Bank A */ STATIC_REG_PKT_LEN = 22, /* Bank B */ @@ -91,6 +92,8 @@ enum pkt_vec { #define pptr_reg(np) pv_ctm_ptr(np) #define imm_a(np) reg_a(STATIC_REG_IMM) #define imm_b(np) reg_b(STATIC_REG_IMM) +#define imma_a(np) reg_a(STATIC_REG_IMMA) +#define imma_b(np) reg_b(STATIC_REG_IMMA) #define imm_both(np) reg_both(STATIC_REG_IMM) #define NFP_BPF_ABI_FLAGS reg_imm(0) @@ -128,6 +131,10 @@ enum pkt_vec { * * @helpers: helper addressess for various calls * @helpers.map_lookup: map lookup helper address + * @helpers.map_update: map update helper address + * @helpers.map_delete: map delete helper address + * + * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) */ struct nfp_app_bpf { struct nfp_app *app; @@ -162,7 +169,18 @@ struct nfp_app_bpf { struct { u32 map_lookup; + u32 map_update; + u32 map_delete; } helpers; + + bool pseudo_random; +}; + +enum nfp_bpf_map_use { + NFP_MAP_UNUSED = 0, + NFP_MAP_USE_READ, + NFP_MAP_USE_WRITE, + NFP_MAP_USE_ATOMIC_CNT, }; /** @@ -171,12 +189,14 @@ struct nfp_app_bpf { * @bpf: back pointer to bpf app private structure * @tid: table id identifying map on datapath * @l: link on the nfp_app_bpf->map_list list + * @use_map: map of how the value is used (in 4B chunks) */ struct nfp_bpf_map { struct bpf_offloaded_map *offmap; struct nfp_app_bpf *bpf; u32 tid; struct list_head l; + enum nfp_bpf_map_use use_map[]; }; struct nfp_prog; @@ -190,6 +210,16 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); #define nfp_meta_next(meta) list_next_entry(meta, l) #define nfp_meta_prev(meta) list_prev_entry(meta, l) +/** + * struct nfp_bpf_reg_state - register state for calls + * @reg: BPF register state from latest path + * @var_off: for stack arg - changes stack offset on different paths + */ +struct nfp_bpf_reg_state { + struct bpf_reg_state reg; + bool var_off; +}; + #define FLAG_INSN_IS_JUMP_DST BIT(0) /** @@ -199,11 +229,16 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); * @ldst_gather_len: memcpy length gathered from load/store sequence * @paired_st: the paired store insn at the head of the sequence * @ptr_not_const: pointer is not always constant + * @pkt_cache: packet data cache information + * @pkt_cache.range_start: start offset for associated packet data cache + * @pkt_cache.range_end: end offset for associated packet data cache + * @pkt_cache.do_init: this read needs to initialize packet data cache + * @xadd_over_16bit: 16bit immediate is not guaranteed + * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions - * @arg2_var_off: arg2 changes stack offset on different paths * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -214,18 +249,27 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); struct nfp_insn_meta { struct bpf_insn insn; union { + /* pointer ops (ld/st/xadd) */ struct { struct bpf_reg_state ptr; struct bpf_insn *paired_st; s16 ldst_gather_len; bool ptr_not_const; + struct { + s16 range_start; + s16 range_end; + bool do_init; + } pkt_cache; + bool xadd_over_16bit; + bool xadd_maybe_16bit; }; + /* jump */ struct nfp_insn_meta *jmp_dst; + /* function calls */ struct { u32 func_id; struct bpf_reg_state arg1; - struct bpf_reg_state arg2; - bool arg2_var_off; + struct nfp_bpf_reg_state arg2; }; }; unsigned int off; @@ -269,6 +313,41 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); } +static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) +{ + return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET; +} + +static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) +{ + return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET; +} + +static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) +{ + u8 code = meta->insn.code; + + return BPF_CLASS(code) == BPF_LD && + (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND); +} + +static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) +{ + u8 code = meta->insn.code; + + return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM; +} + +static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) +{ + return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; +} + +static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) +{ + return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); +} + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 0a7732385469..42d98792bd25 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -164,6 +164,41 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) return 0; } +/* Atomic engine requires values to be in big endian, we need to byte swap + * the value words used with xadd. + */ +static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value) +{ + u32 *word = value; + unsigned int i; + + for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) + if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT) + word[i] = (__force u32)cpu_to_be32(word[i]); +} + +static int +nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap, + void *key, void *value) +{ + int err; + + err = nfp_bpf_ctrl_lookup_entry(offmap, key, value); + if (err) + return err; + + nfp_map_bpf_byte_swap(offmap->dev_priv, value); + return 0; +} + +static int +nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap, + void *key, void *value, u64 flags) +{ + nfp_map_bpf_byte_swap(offmap->dev_priv, value); + return nfp_bpf_ctrl_update_entry(offmap, key, value, flags); +} + static int nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap, void *key, void *next_key) @@ -183,8 +218,8 @@ nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key) static const struct bpf_map_dev_ops nfp_bpf_map_ops = { .map_get_next_key = nfp_bpf_map_get_next_key, - .map_lookup_elem = nfp_bpf_ctrl_lookup_entry, - .map_update_elem = nfp_bpf_ctrl_update_entry, + .map_lookup_elem = nfp_bpf_map_lookup_entry, + .map_update_elem = nfp_bpf_map_update_entry, .map_delete_elem = nfp_bpf_map_delete_elem, }; @@ -192,6 +227,7 @@ static int nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) { struct nfp_bpf_map *nfp_map; + unsigned int use_map_size; long long int res; if (!bpf->maps.types) @@ -226,7 +262,10 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) return -ENOMEM; } - nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER); + use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * + FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]); + + nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER); if (!nfp_map) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 479f602887e9..06ad53ce4ad9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -97,7 +97,7 @@ nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, if (nfp_prog->adjust_head_location != meta->n) goto exit_set_location; - if (meta->arg2.var_off.value != imm) + if (meta->arg2.reg.var_off.value != imm) goto exit_set_location; } @@ -107,14 +107,69 @@ exit_set_location: } static int +nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + struct nfp_bpf_reg_state *old_arg) +{ + s64 off, old_off; + + if (reg->type != PTR_TO_STACK) { + pr_vlog(env, "%s: unsupported ptr type %d\n", + fname, reg->type); + return false; + } + if (!tnum_is_const(reg->var_off)) { + pr_vlog(env, "%s: variable pointer\n", fname); + return false; + } + + off = reg->var_off.value + reg->off; + if (-off % 4) { + pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off); + return false; + } + + /* Rest of the checks is only if we re-parse the same insn */ + if (!old_arg) + return true; + + old_off = old_arg->reg.var_off.value + old_arg->reg.off; + old_arg->var_off |= off != old_off; + + return true; +} + +static bool +nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, + struct nfp_insn_meta *meta, + u32 helper_tgt, const struct bpf_reg_state *reg1) +{ + if (!helper_tgt) { + pr_vlog(env, "%s: not supported by FW\n", fname); + return false; + } + + /* Rest of the checks is only if we re-parse the same insn */ + if (!meta->func_id) + return true; + + if (meta->arg1.map_ptr != reg1->map_ptr) { + pr_vlog(env, "%s: called for different map\n", fname); + return false; + } + + return true; +} + +static int nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, struct nfp_insn_meta *meta) { const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; + const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; struct nfp_app_bpf *bpf = nfp_prog->bpf; u32 func_id = meta->insn.imm; - s64 off, old_off; switch (func_id) { case BPF_FUNC_xdp_adjust_head: @@ -131,41 +186,36 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, break; case BPF_FUNC_map_lookup_elem: - if (!bpf->helpers.map_lookup) { - pr_vlog(env, "map_lookup: not supported by FW\n"); + if (!nfp_bpf_map_call_ok("map_lookup", env, meta, + bpf->helpers.map_lookup, reg1) || + !nfp_bpf_stack_arg_ok("map_lookup", env, reg2, + meta->func_id ? &meta->arg2 : NULL)) return -EOPNOTSUPP; - } - if (reg2->type != PTR_TO_STACK) { - pr_vlog(env, - "map_lookup: unsupported key ptr type %d\n", - reg2->type); - return -EOPNOTSUPP; - } - if (!tnum_is_const(reg2->var_off)) { - pr_vlog(env, "map_lookup: variable key pointer\n"); + break; + + case BPF_FUNC_map_update_elem: + if (!nfp_bpf_map_call_ok("map_update", env, meta, + bpf->helpers.map_update, reg1) || + !nfp_bpf_stack_arg_ok("map_update", env, reg2, + meta->func_id ? &meta->arg2 : NULL) || + !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL)) return -EOPNOTSUPP; - } + break; - off = reg2->var_off.value + reg2->off; - if (-off % 4) { - pr_vlog(env, - "map_lookup: unaligned stack pointer %lld\n", - -off); + case BPF_FUNC_map_delete_elem: + if (!nfp_bpf_map_call_ok("map_delete", env, meta, + bpf->helpers.map_delete, reg1) || + !nfp_bpf_stack_arg_ok("map_delete", env, reg2, + meta->func_id ? &meta->arg2 : NULL)) return -EOPNOTSUPP; - } + break; - /* Rest of the checks is only if we re-parse the same insn */ - if (!meta->func_id) + case BPF_FUNC_get_prandom_u32: + if (bpf->pseudo_random) break; + pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n"); + return -EOPNOTSUPP; - old_off = meta->arg2.var_off.value + meta->arg2.off; - meta->arg2_var_off |= off != old_off; - - if (meta->arg1.map_ptr != reg1->map_ptr) { - pr_vlog(env, "map_lookup: called for different map\n"); - return -EOPNOTSUPP; - } - break; default: pr_vlog(env, "unsupported function id: %d\n", func_id); return -EOPNOTSUPP; @@ -173,7 +223,7 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, meta->func_id = func_id; meta->arg1 = *reg1; - meta->arg2 = *reg2; + meta->arg2.reg = *reg2; return 0; } @@ -242,6 +292,72 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, return -EINVAL; } +static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use) +{ + static const char * const names[] = { + [NFP_MAP_UNUSED] = "unused", + [NFP_MAP_USE_READ] = "read", + [NFP_MAP_USE_WRITE] = "write", + [NFP_MAP_USE_ATOMIC_CNT] = "atomic", + }; + + if (use >= ARRAY_SIZE(names) || !names[use]) + return "unknown"; + return names[use]; +} + +static int +nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, + struct nfp_bpf_map *nfp_map, + unsigned int off, enum nfp_bpf_map_use use) +{ + if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED && + nfp_map->use_map[off / 4] != use) { + pr_vlog(env, "map value use type conflict %s vs %s off: %u\n", + nfp_bpf_map_use_name(nfp_map->use_map[off / 4]), + nfp_bpf_map_use_name(use), off); + return -EOPNOTSUPP; + } + + nfp_map->use_map[off / 4] = use; + + return 0; +} + +static int +nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta, + const struct bpf_reg_state *reg, + enum nfp_bpf_map_use use) +{ + struct bpf_offloaded_map *offmap; + struct nfp_bpf_map *nfp_map; + unsigned int size, off; + int i, err; + + if (!tnum_is_const(reg->var_off)) { + pr_vlog(env, "map value offset is variable\n"); + return -EOPNOTSUPP; + } + + off = reg->var_off.value + meta->insn.off + reg->off; + size = BPF_LDST_BYTES(&meta->insn); + offmap = map_to_offmap(reg->map_ptr); + nfp_map = offmap->dev_priv; + + if (off + size > offmap->map.value_size) { + pr_vlog(env, "map value access out-of-bounds\n"); + return -EINVAL; + } + + for (i = 0; i < size; i += 4 - (off + i) % 4) { + err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use); + if (err) + return err; + } + + return 0; +} + static int nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, struct bpf_verifier_env *env, u8 reg_no) @@ -264,10 +380,22 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } if (reg->type == PTR_TO_MAP_VALUE) { + if (is_mbpf_load(meta)) { + err = nfp_bpf_map_mark_used(env, meta, reg, + NFP_MAP_USE_READ); + if (err) + return err; + } if (is_mbpf_store(meta)) { pr_vlog(env, "map writes not supported\n"); return -EOPNOTSUPP; } + if (is_mbpf_xadd(meta)) { + err = nfp_bpf_map_mark_used(env, meta, reg, + NFP_MAP_USE_ATOMIC_CNT); + if (err) + return err; + } } if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { @@ -282,6 +410,31 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } static int +nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; + const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg; + + if (dreg->type != PTR_TO_MAP_VALUE) { + pr_vlog(env, "atomic add not to a map value pointer: %d\n", + dreg->type); + return -EOPNOTSUPP; + } + if (sreg->type != SCALAR_VALUE) { + pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type); + return -EOPNOTSUPP; + } + + meta->xadd_over_16bit |= + sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff; + meta->xadd_maybe_16bit |= + (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff; + + return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); +} + +static int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; @@ -313,6 +466,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_store(meta)) return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); + if (is_mbpf_xadd(meta)) + return nfp_bpf_check_xadd(nfp_prog, meta, env); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/Makefile b/drivers/net/ethernet/netronome/nfp/flower/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/flower/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index baaea6f1a9d8..3735c09d2112 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -104,7 +104,8 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, msg->ports[idx].phys_port = phys_port; } -int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) +int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok, + unsigned int mtu, bool mtu_only) { struct nfp_flower_cmsg_portmod *msg; struct sk_buff *skb; @@ -118,7 +119,11 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); msg->reserved = 0; msg->info = carrier_ok; - msg->mtu = cpu_to_be16(repr->netdev->mtu); + + if (mtu_only) + msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY; + + msg->mtu = cpu_to_be16(mtu); nfp_ctrl_tx(repr->app->ctrl, skb); @@ -146,6 +151,34 @@ int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) return 0; } +static bool +nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_flower_cmsg_portmod *msg; + + msg = nfp_flower_cmsg_get_data(skb); + + if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY)) + return false; + + spin_lock_bh(&app_priv->mtu_conf.lock); + if (!app_priv->mtu_conf.requested_val || + app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) || + be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) { + /* Not an ack for requested MTU change. */ + spin_unlock_bh(&app_priv->mtu_conf.lock); + return false; + } + + app_priv->mtu_conf.ack = true; + app_priv->mtu_conf.requested_val = 0; + wake_up(&app_priv->mtu_conf.wait_q); + spin_unlock_bh(&app_priv->mtu_conf.lock); + + return true; +} + static void nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) { @@ -269,6 +302,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) /* We need to deal with stats updates from HW asap */ nfp_flower_rx_flow_stats(app, skb); dev_consume_skb_any(skb); + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD && + nfp_flower_process_mtu_ack(app, skb)) { + /* Handle MTU acks outside wq to prevent RTNL conflict. */ + dev_consume_skb_any(skb); } else { skb_queue_tail(&priv->cmsg_skbs, skb); schedule_work(&priv->cmsg_work); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index adfe474c2cf0..96bc0e33980c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -61,6 +61,16 @@ #define NFP_FLOWER_MASK_MPLS_BOS BIT(8) #define NFP_FLOWER_MASK_MPLS_Q BIT(0) +#define NFP_FL_IP_FRAG_FIRST BIT(7) +#define NFP_FL_IP_FRAGMENTED BIT(6) + +/* Compressed HW representation of TCP Flags */ +#define NFP_FL_TCP_FLAG_URG BIT(4) +#define NFP_FL_TCP_FLAG_PSH BIT(3) +#define NFP_FL_TCP_FLAG_RST BIT(2) +#define NFP_FL_TCP_FLAG_SYN BIT(1) +#define NFP_FL_TCP_FLAG_FIN BIT(0) + #define NFP_FL_SC_ACT_DROP 0x80000000 #define NFP_FL_SC_ACT_USER 0x7D000000 #define NFP_FL_SC_ACT_POPV 0x6A000000 @@ -253,11 +263,18 @@ struct nfp_flower_tp_ports { __be16 port_dst; }; +struct nfp_flower_ip_ext { + u8 tos; + u8 proto; + u8 ttl; + u8 flags; +}; + /* L3 IPv4 details (3W/12B) * 3 2 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | DSCP |ECN| protocol | reserved | + * | DSCP |ECN| protocol | ttl | flags | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ipv4_addr_src | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -265,10 +282,7 @@ struct nfp_flower_tp_ports { * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct nfp_flower_ipv4 { - u8 tos; - u8 proto; - u8 ttl; - u8 reserved; + struct nfp_flower_ip_ext ip_ext; __be32 ipv4_src; __be32 ipv4_dst; }; @@ -277,7 +291,7 @@ struct nfp_flower_ipv4 { * 3 2 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | DSCP |ECN| protocol | reserved | + * | DSCP |ECN| protocol | ttl | flags | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ipv6_exthdr | res | ipv6_flow_label | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -299,10 +313,7 @@ struct nfp_flower_ipv4 { * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct nfp_flower_ipv6 { - u8 tos; - u8 proto; - u8 ttl; - u8 reserved; + struct nfp_flower_ip_ext ip_ext; __be32 ipv6_flow_label_exthdr; struct in6_addr ipv6_src; struct in6_addr ipv6_dst; @@ -386,6 +397,7 @@ struct nfp_flower_cmsg_portmod { }; #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0) +#define NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY BIT(1) /* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */ struct nfp_flower_cmsg_portreify { @@ -453,7 +465,8 @@ void nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, unsigned int nbi, unsigned int nbi_port, unsigned int phys_port); -int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); +int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok, + unsigned int mtu, bool mtu_only); int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists); void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 742d6f1575b5..6357e0720f43 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -52,6 +52,8 @@ #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL +#define NFP_FLOWER_FRAME_HEADROOM 158 + static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) { return "FLOWER"; @@ -157,7 +159,7 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) { int err; - err = nfp_flower_cmsg_portmod(repr, true); + err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); if (err) return err; @@ -171,7 +173,7 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { netif_tx_disable(repr->netdev); - return nfp_flower_cmsg_portmod(repr, false); + return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } static int @@ -521,6 +523,9 @@ static int nfp_flower_init(struct nfp_app *app) INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); + init_waitqueue_head(&app_priv->mtu_conf.wait_q); + spin_lock_init(&app_priv->mtu_conf.lock); + err = nfp_flower_metadata_init(app); if (err) goto err_free_app_priv; @@ -552,6 +557,81 @@ static void nfp_flower_clean(struct nfp_app *app) app->priv = NULL; } +static int +nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev, + int new_mtu) +{ + /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the + * supported max MTU to allow for appending tunnel headers. To prevent + * unexpected behaviour this needs to be accounted for. + */ + if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) { + nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu); + return -EINVAL; + } + + return 0; +} + +static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) +{ + bool ret; + + spin_lock_bh(&app_priv->mtu_conf.lock); + ret = app_priv->mtu_conf.ack; + spin_unlock_bh(&app_priv->mtu_conf.lock); + + return ret; +} + +static int +nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, + int new_mtu) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_repr *repr = netdev_priv(netdev); + int err, ack; + + /* Only need to config FW for physical port MTU change. */ + if (repr->port->type != NFP_PORT_PHYS_PORT) + return 0; + + if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { + nfp_err(app->cpp, "Physical port MTU setting not supported\n"); + return -EINVAL; + } + + spin_lock_bh(&app_priv->mtu_conf.lock); + app_priv->mtu_conf.ack = false; + app_priv->mtu_conf.requested_val = new_mtu; + app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; + spin_unlock_bh(&app_priv->mtu_conf.lock); + + err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, + true); + if (err) { + spin_lock_bh(&app_priv->mtu_conf.lock); + app_priv->mtu_conf.requested_val = 0; + spin_unlock_bh(&app_priv->mtu_conf.lock); + return err; + } + + /* Wait for fw to ack the change. */ + ack = wait_event_timeout(app_priv->mtu_conf.wait_q, + nfp_flower_check_ack(app_priv), + msecs_to_jiffies(10)); + + if (!ack) { + spin_lock_bh(&app_priv->mtu_conf.lock); + app_priv->mtu_conf.requested_val = 0; + spin_unlock_bh(&app_priv->mtu_conf.lock); + nfp_warn(app->cpp, "MTU change not verified with fw\n"); + return -EIO; + } + + return 0; +} + static int nfp_flower_start(struct nfp_app *app) { return nfp_tunnel_config_start(app); @@ -574,6 +654,9 @@ const struct nfp_app_type app_flower = { .init = nfp_flower_init, .clean = nfp_flower_clean, + .check_mtu = nfp_flower_check_mtu, + .repr_change_mtu = nfp_flower_repr_change_mtu, + .vnic_alloc = nfp_flower_vnic_alloc, .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 332ff0fdc038..e030b3ce4510 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -41,6 +41,7 @@ #include <linux/time64.h> #include <linux/types.h> #include <net/pkt_cls.h> +#include <net/tcp.h> #include <linux/workqueue.h> struct net_device; @@ -64,6 +65,7 @@ struct nfp_app; /* Extra features bitmap. */ #define NFP_FL_FEATS_GENEVE BIT(0) +#define NFP_FL_NBI_MTU_SETTING BIT(1) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -78,6 +80,22 @@ struct nfp_fl_stats_id { }; /** + * struct nfp_mtu_conf - manage MTU setting + * @portnum: NFP port number of repr with requested MTU change + * @requested_val: MTU value requested for repr + * @ack: Received ack that MTU has been correctly set + * @wait_q: Wait queue for MTU acknowledgements + * @lock: Lock for setting/reading MTU variables + */ +struct nfp_mtu_conf { + u32 portnum; + unsigned int requested_val; + bool ack; + wait_queue_head_t wait_q; + spinlock_t lock; +}; + +/** * struct nfp_flower_priv - Flower APP per-vNIC priv data * @app: Back pointer to app * @nn: Pointer to vNIC @@ -105,6 +123,7 @@ struct nfp_fl_stats_id { * @reify_replies: atomically stores the number of replies received * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting + * @mtu_conf: Configuration of repr MTU value */ struct nfp_flower_priv { struct nfp_app *app; @@ -132,6 +151,7 @@ struct nfp_flower_priv { struct notifier_block nfp_tun_neigh_nb; atomic_t reify_replies; wait_queue_head_t reify_wait_queue; + struct nfp_mtu_conf mtu_conf; }; struct nfp_fl_key_ls { diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 37c2ecae2a7a..91935405f586 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -146,26 +146,15 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame, } static void -nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame, + struct tc_cls_flower_offload *flow, + bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *addr; - struct flow_dissector_key_basic *basic; - - memset(frame, 0, sizeof(struct nfp_flower_ipv4)); - - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target); - frame->ipv4_src = addr->src; - frame->ipv4_dst = addr->dst; - } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *basic; + basic = skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_BASIC, target); @@ -181,6 +170,60 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, frame->tos = flow_ip->tos; frame->ttl = flow_ip->ttl; } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_dissector_key_tcp *tcp; + u32 tcp_flags; + + tcp = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_TCP, target); + tcp_flags = be16_to_cpu(tcp->flags); + + if (tcp_flags & TCPHDR_FIN) + frame->flags |= NFP_FL_TCP_FLAG_FIN; + if (tcp_flags & TCPHDR_SYN) + frame->flags |= NFP_FL_TCP_FLAG_SYN; + if (tcp_flags & TCPHDR_RST) + frame->flags |= NFP_FL_TCP_FLAG_RST; + if (tcp_flags & TCPHDR_PSH) + frame->flags |= NFP_FL_TCP_FLAG_PSH; + if (tcp_flags & TCPHDR_URG) + frame->flags |= NFP_FL_TCP_FLAG_URG; + } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key; + + key = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + target); + if (key->flags & FLOW_DIS_IS_FRAGMENT) + frame->flags |= NFP_FL_IP_FRAGMENTED; + if (key->flags & FLOW_DIS_FIRST_FRAG) + frame->flags |= NFP_FL_IP_FRAG_FIRST; + } +} + +static void +nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, + struct tc_cls_flower_offload *flow, + bool mask_version) +{ + struct fl_flow_key *target = mask_version ? flow->mask : flow->key; + struct flow_dissector_key_ipv4_addrs *addr; + + memset(frame, 0, sizeof(struct nfp_flower_ipv4)); + + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + target); + frame->ipv4_src = addr->src; + frame->ipv4_dst = addr->dst; + } + + nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); } static void @@ -190,7 +233,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; struct flow_dissector_key_ipv6_addrs *addr; - struct flow_dissector_key_basic *basic; memset(frame, 0, sizeof(struct nfp_flower_ipv6)); @@ -203,22 +245,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, frame->ipv6_dst = addr->dst; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - target); - frame->proto = basic->ip_proto; - } - - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *flow_ip; - - flow_ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IP, - target); - frame->tos = flow_ip->tos; - frame->ttl = flow_ip->ttl; - } + nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index eb5c13dea8f5..114d2ab02a38 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -44,11 +44,20 @@ #include "../nfp_net.h" #include "../nfp_port.h" +#define NFP_FLOWER_SUPPORTED_TCPFLAGS \ + (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ + TCPHDR_PSH | TCPHDR_URG) + +#define NFP_FLOWER_SUPPORTED_CTLFLAGS \ + (FLOW_DIS_IS_FRAGMENT | \ + FLOW_DIS_FIRST_FRAG) + #define NFP_FLOWER_WHITELIST_DISSECTOR \ (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ BIT(FLOW_DISSECTOR_KEY_BASIC) | \ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_TCP) | \ BIT(FLOW_DISSECTOR_KEY_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_VLAN) | \ @@ -288,6 +297,46 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_dissector_key_tcp *tcp; + u32 tcp_flags; + + tcp = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_TCP, + flow->key); + tcp_flags = be16_to_cpu(tcp->flags); + + if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) + return -EOPNOTSUPP; + + /* We only support PSH and URG flags when either + * FIN, SYN or RST is present as well. + */ + if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && + !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) + return -EOPNOTSUPP; + + /* We need to store TCP flags in the IPv4 key space, thus + * we need to ensure we include a IPv4 key layer if we have + * not done so already. + */ + if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { + key_layer |= NFP_FLOWER_LAYER_IPV4; + key_size += sizeof(struct nfp_flower_ipv4); + } + } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key_ctl; + + key_ctl = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + flow->key); + + if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) + return -EOPNOTSUPP; + } + ret_key_ls->key_layer = key_layer; ret_key_ls->key_layer_two = key_layer_two; ret_key_ls->key_size = key_size; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 20546ae67909..2d9cb2528fc7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -86,8 +86,8 @@ extern const struct nfp_app_type app_flower; * @repr_clean: representor about to be unregistered * @repr_open: representor netdev open callback * @repr_stop: representor netdev stop callback - * @change_mtu: MTU change on a netdev has been requested (veto-only, change - * is not guaranteed to be committed) + * @check_mtu: MTU change request on a netdev (verify it is valid) + * @repr_change_mtu: MTU change request on repr (make and verify change) * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler @@ -124,8 +124,10 @@ struct nfp_app_type { int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); - int (*change_mtu)(struct nfp_app *app, struct net_device *netdev, - int new_mtu); + int (*check_mtu)(struct nfp_app *app, struct net_device *netdev, + int new_mtu); + int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev, + int new_mtu); int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); @@ -247,11 +249,20 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev) } static inline int -nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) +nfp_app_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) { - if (!app || !app->type->change_mtu) + if (!app || !app->type->check_mtu) return 0; - return app->type->change_mtu(app, netdev, new_mtu); + return app->type->check_mtu(app, netdev, new_mtu); +} + +static inline int +nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, + int new_mtu) +{ + if (!app || !app->type->repr_change_mtu) + return 0; + return app->type->repr_change_mtu(app, netdev, new_mtu); } static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 1e597600c693..cc6ace2be8a9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -48,6 +48,8 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c }, [CMD_TGT_READ_LE] = { 0x01, 0x40 }, [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, + [CMD_TGT_ADD] = { 0x00, 0x47 }, + [CMD_TGT_ADD_IMM] = { 0x02, 0x47 }, }; static bool unreg_is_imm(u16 reg) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 5f9291db98e0..5f2b2f24f4fa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -39,6 +39,7 @@ #include <linux/types.h> #define REG_NONE 0 +#define REG_WIDTH 4 #define RE_REG_NO_DST 0x020 #define RE_REG_IMM 0x020 @@ -237,6 +238,8 @@ enum cmd_tgt_map { CMD_TGT_READ32_SWAP, CMD_TGT_READ_LE, CMD_TGT_READ_SWAP_LE, + CMD_TGT_ADD, + CMD_TGT_ADD_IMM, __CMD_TGT_MAP_SIZE, }; @@ -250,9 +253,12 @@ enum cmd_mode { enum cmd_ctx_swap { CMD_CTX_SWAP = 0, + CMD_CTX_SWAP_DEFER1 = 1, + CMD_CTX_SWAP_DEFER2 = 2, CMD_CTX_NO_SWAP = 3, }; +#define CMD_OVE_DATA GENMASK(5, 3) #define CMD_OVE_LEN BIT(7) #define CMD_OV_LEN GENMASK(12, 8) @@ -278,6 +284,7 @@ enum lcsr_wr_src { #define NFP_CSR_ACT_LM_ADDR1 0x6c #define NFP_CSR_ACT_LM_ADDR2 0x94 #define NFP_CSR_ACT_LM_ADDR3 0x9c +#define NFP_CSR_PSEUDO_RND_NUM 0x148 /* Software register representation, independent of operand type */ #define NN_REG_TYPE GENMASK(31, 24) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index ab301d56430b..c4b1f344b4da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -645,6 +645,7 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 787df47ec430..bd7d8ae31e17 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -391,6 +391,7 @@ struct nfp_net_rx_ring { * @rx_drops: Number of packets dropped on RX due to lack of resources * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK + * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported * @hw_csum_rx_error: Counter of packets with bad checksums * @tx_sync: Seqlock for atomic updates of TX stats * @tx_pkts: Number of Transmitted packets @@ -434,7 +435,7 @@ struct nfp_net_r_vector { u64 rx_drops; u64 hw_csum_rx_ok; u64 hw_csum_rx_inner_ok; - u64 hw_csum_rx_error; + u64 hw_csum_rx_complete; struct nfp_net_tx_ring *xdp_ring; @@ -446,6 +447,7 @@ struct nfp_net_r_vector { u64 tx_gather; u64 tx_lso; + u64 hw_csum_rx_error; u64 rx_replace_buf_alloc_fail; u64 tx_errors; u64 tx_busy; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a05be0ab2713..1eb6549f2a54 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1406,7 +1406,7 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp, skb->ip_summed = meta->csum_type; skb->csum = meta->csum; u64_stats_update_begin(&r_vec->rx_sync); - r_vec->hw_csum_rx_ok++; + r_vec->hw_csum_rx_complete++; u64_stats_update_end(&r_vec->rx_sync); return; } @@ -3066,7 +3066,7 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) struct nfp_net_dp *dp; int err; - err = nfp_app_change_mtu(nn->app, netdev, new_mtu); + err = nfp_app_check_mtu(nn->app, netdev, new_mtu); if (err) return err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 4499a7333078..bb63c115537d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Netronome Systems, Inc. + * Copyright (C) 2015-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -51,12 +51,12 @@ * The configuration BAR is 8K in size, but due to * THB-350, 32k needs to be reserved. */ -#define NFP_NET_CFG_BAR_SZ (32 * 1024) +#define NFP_NET_CFG_BAR_SZ (32 * 1024) /** * Offset in Freelist buffer where packet starts on RX */ -#define NFP_NET_RX_OFFSET 32 +#define NFP_NET_RX_OFFSET 32 /** * LSO parameters @@ -75,65 +75,65 @@ #define NFP_NET_META_PORTID 5 #define NFP_NET_META_CSUM 6 /* checksum complete type */ -#define NFP_META_PORT_ID_CTRL ~0U +#define NFP_META_PORT_ID_CTRL ~0U /** * Hash type pre-pended when a RSS hash was computed */ -#define NFP_NET_RSS_NONE 0 -#define NFP_NET_RSS_IPV4 1 -#define NFP_NET_RSS_IPV6 2 -#define NFP_NET_RSS_IPV6_EX 3 -#define NFP_NET_RSS_IPV4_TCP 4 -#define NFP_NET_RSS_IPV6_TCP 5 -#define NFP_NET_RSS_IPV6_EX_TCP 6 -#define NFP_NET_RSS_IPV4_UDP 7 -#define NFP_NET_RSS_IPV6_UDP 8 -#define NFP_NET_RSS_IPV6_EX_UDP 9 +#define NFP_NET_RSS_NONE 0 +#define NFP_NET_RSS_IPV4 1 +#define NFP_NET_RSS_IPV6 2 +#define NFP_NET_RSS_IPV6_EX 3 +#define NFP_NET_RSS_IPV4_TCP 4 +#define NFP_NET_RSS_IPV6_TCP 5 +#define NFP_NET_RSS_IPV6_EX_TCP 6 +#define NFP_NET_RSS_IPV4_UDP 7 +#define NFP_NET_RSS_IPV6_UDP 8 +#define NFP_NET_RSS_IPV6_EX_UDP 9 /** * Ring counts - * %NFP_NET_TXR_MAX: Maximum number of TX rings - * %NFP_NET_RXR_MAX: Maximum number of RX rings + * %NFP_NET_TXR_MAX: Maximum number of TX rings + * %NFP_NET_RXR_MAX: Maximum number of RX rings */ -#define NFP_NET_TXR_MAX 64 -#define NFP_NET_RXR_MAX 64 +#define NFP_NET_TXR_MAX 64 +#define NFP_NET_RXR_MAX 64 /** * Read/Write config words (0x0000 - 0x002c) - * %NFP_NET_CFG_CTRL: Global control + * %NFP_NET_CFG_CTRL: Global control * %NFP_NET_CFG_UPDATE: Indicate which fields are updated * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings - * %NFP_NET_CFG_MTU: Set MTU size + * %NFP_NET_CFG_MTU: Set MTU size * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) - * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions - * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes * %NFP_NET_CFG_MACADDR: MAC address * * TODO: * - define Error details in UPDATE */ -#define NFP_NET_CFG_CTRL 0x0000 -#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ -#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ -#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ -#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ -#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ -#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ -#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ -#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ -#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ -#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ -#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */ +#define NFP_NET_CFG_CTRL 0x0000 +#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ +#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ +#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ +#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ +#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ +#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ +#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ +#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ +#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ +#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ +#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */ #define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */ -#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ +#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ #define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */ -#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ -#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ -#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ -#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ -#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ +#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ +#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ +#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ #define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ #define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */ #define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */ @@ -152,35 +152,35 @@ #define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \ NFP_NET_CFG_CTRL_CSUM_COMPLETE) -#define NFP_NET_CFG_UPDATE 0x0004 -#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ -#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ -#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ -#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ -#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ -#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ -#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ -#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ -#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ +#define NFP_NET_CFG_UPDATE 0x0004 +#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ +#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ +#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ +#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ +#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ +#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ +#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ +#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ +#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ #define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ #define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */ #define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ #define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */ #define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */ -#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ -#define NFP_NET_CFG_TXRS_ENABLE 0x0008 -#define NFP_NET_CFG_RXRS_ENABLE 0x0010 -#define NFP_NET_CFG_MTU 0x0018 -#define NFP_NET_CFG_FLBUFSZ 0x001c -#define NFP_NET_CFG_EXN 0x001f -#define NFP_NET_CFG_LSC 0x0020 -#define NFP_NET_CFG_MACADDR 0x0024 +#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ +#define NFP_NET_CFG_TXRS_ENABLE 0x0008 +#define NFP_NET_CFG_RXRS_ENABLE 0x0010 +#define NFP_NET_CFG_MTU 0x0018 +#define NFP_NET_CFG_FLBUFSZ 0x001c +#define NFP_NET_CFG_EXN 0x001f +#define NFP_NET_CFG_LSC 0x0020 +#define NFP_NET_CFG_MACADDR 0x0024 /** * Read-only words (0x0030 - 0x0050): * %NFP_NET_CFG_VERSION: Firmware version number - * %NFP_NET_CFG_STS: Status - * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL) + * %NFP_NET_CFG_STS: Status + * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL) * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings * %NFP_NET_CFG_MAX_MTU: Maximum support MTU @@ -190,37 +190,37 @@ * TODO: * - define more STS bits */ -#define NFP_NET_CFG_VERSION 0x0030 +#define NFP_NET_CFG_VERSION 0x0030 #define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24) #define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16) -#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) +#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) #define NFP_NET_CFG_VERSION_CLASS_GENERIC 0 #define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8) -#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) +#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) #define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0) -#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) -#define NFP_NET_CFG_STS 0x0034 -#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) +#define NFP_NET_CFG_STS 0x0034 +#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ /* Link rate */ #define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1 #define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF -#define NFP_NET_CFG_STS_LINK_RATE \ +#define NFP_NET_CFG_STS_LINK_RATE \ (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT) #define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0 -#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1 -#define NFP_NET_CFG_STS_LINK_RATE_1G 2 -#define NFP_NET_CFG_STS_LINK_RATE_10G 3 -#define NFP_NET_CFG_STS_LINK_RATE_25G 4 -#define NFP_NET_CFG_STS_LINK_RATE_40G 5 -#define NFP_NET_CFG_STS_LINK_RATE_50G 6 -#define NFP_NET_CFG_STS_LINK_RATE_100G 7 -#define NFP_NET_CFG_CAP 0x0038 -#define NFP_NET_CFG_MAX_TXRINGS 0x003c -#define NFP_NET_CFG_MAX_RXRINGS 0x0040 -#define NFP_NET_CFG_MAX_MTU 0x0044 +#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1 +#define NFP_NET_CFG_STS_LINK_RATE_1G 2 +#define NFP_NET_CFG_STS_LINK_RATE_10G 3 +#define NFP_NET_CFG_STS_LINK_RATE_25G 4 +#define NFP_NET_CFG_STS_LINK_RATE_40G 5 +#define NFP_NET_CFG_STS_LINK_RATE_50G 6 +#define NFP_NET_CFG_STS_LINK_RATE_100G 7 +#define NFP_NET_CFG_CAP 0x0038 +#define NFP_NET_CFG_MAX_TXRINGS 0x003c +#define NFP_NET_CFG_MAX_RXRINGS 0x0040 +#define NFP_NET_CFG_MAX_MTU 0x0044 /* Next two words are being used by VFs for solving THB350 issue */ -#define NFP_NET_CFG_START_TXQ 0x0048 -#define NFP_NET_CFG_START_RXQ 0x004c +#define NFP_NET_CFG_START_TXQ 0x0048 +#define NFP_NET_CFG_START_RXQ 0x004c /** * Prepend configuration @@ -280,8 +280,8 @@ /** * 40B reserved for future use (0x0098 - 0x00c0) */ -#define NFP_NET_CFG_RESERVED 0x0098 -#define NFP_NET_CFG_RESERVED_SZ 0x0028 +#define NFP_NET_CFG_RESERVED 0x0098 +#define NFP_NET_CFG_RESERVED_SZ 0x0028 /** * RSS configuration (0x0100 - 0x01ac): @@ -290,26 +290,26 @@ * %NFP_NET_CFG_RSS_KEY: RSS "secret" key * %NFP_NET_CFG_RSS_ITBL: RSS indirection table */ -#define NFP_NET_CFG_RSS_BASE 0x0100 -#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE -#define NFP_NET_CFG_RSS_MASK (0x7f) -#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) -#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ -#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ -#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ -#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ -#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ -#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_BASE 0x0100 +#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE +#define NFP_NET_CFG_RSS_MASK (0x7f) +#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) +#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ +#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ +#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ +#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ +#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ +#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ #define NFP_NET_CFG_RSS_HFUNC 0xff000000 -#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ #define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */ #define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */ #define NFP_NET_CFG_RSS_HFUNCS 3 -#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) -#define NFP_NET_CFG_RSS_KEY_SZ 0x28 -#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ +#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) +#define NFP_NET_CFG_RSS_KEY_SZ 0x28 +#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ NFP_NET_CFG_RSS_KEY_SZ) -#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 +#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 /** * TX ring configuration (0x200 - 0x800) @@ -321,13 +321,13 @@ * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet */ -#define NFP_NET_CFG_TXR_BASE 0x0200 -#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) -#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ +#define NFP_NET_CFG_TXR_BASE 0x0200 +#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ ((_x) * 0x8)) -#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) -#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) -#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) +#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) +#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) +#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) #define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \ ((_x) * 0x4)) @@ -340,11 +340,11 @@ * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) */ -#define NFP_NET_CFG_RXR_BASE 0x0800 -#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) -#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) -#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) -#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) +#define NFP_NET_CFG_RXR_BASE 0x0800 +#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) +#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) +#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) #define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \ ((_x) * 0x4)) @@ -358,36 +358,36 @@ * the MSI-X entry and the host driver must clear the register to * re-enable the interrupt. */ -#define NFP_NET_CFG_ICR_BASE 0x0c00 -#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) -#define NFP_NET_CFG_ICR_UNMASKED 0x0 -#define NFP_NET_CFG_ICR_RXTX 0x1 -#define NFP_NET_CFG_ICR_LSC 0x2 +#define NFP_NET_CFG_ICR_BASE 0x0c00 +#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) +#define NFP_NET_CFG_ICR_UNMASKED 0x0 +#define NFP_NET_CFG_ICR_RXTX 0x1 +#define NFP_NET_CFG_ICR_LSC 0x2 /** * General device stats (0x0d00 - 0x0d90) * all counters are 64bit. */ -#define NFP_NET_CFG_STATS_BASE 0x0d00 -#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) -#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) -#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) -#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) -#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) -#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) -#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) -#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) -#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) - -#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) -#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) -#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) -#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) -#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) -#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) -#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) -#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) -#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) +#define NFP_NET_CFG_STATS_BASE 0x0d00 +#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) +#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) +#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) +#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) +#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) +#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) +#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) +#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) +#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) + +#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) +#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) +#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) +#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) +#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) +#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) +#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) +#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) +#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) #define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90) #define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98) @@ -404,11 +404,11 @@ * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) */ -#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 -#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ +#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 +#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ ((_x) * 0x10)) -#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 -#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ +#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 +#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ ((_x) * 0x10)) /** @@ -444,7 +444,7 @@ * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV - * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments + * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV * * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE. @@ -457,12 +457,12 @@ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH. */ #define NFP_NET_CFG_TLV_TYPE 0x00 -#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000 +#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000 #define NFP_NET_CFG_TLV_LENGTH 0x02 #define NFP_NET_CFG_TLV_LENGTH_INC 4 #define NFP_NET_CFG_TLV_VALUE 0x04 -#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000 +#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000 #define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000 #define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index cf81cf95d1d8..67cdd8330c59 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -231,15 +231,15 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); - debugfs_create_file(name, S_IRUSR, rx, + debugfs_create_file(name, 0400, rx, &nn->r_vecs[i], &nfp_rx_q_fops); - debugfs_create_file(name, S_IRUSR, xdp, + debugfs_create_file(name, 0400, xdp, &nn->r_vecs[i], &nfp_xdp_q_fops); } for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); - debugfs_create_file(name, S_IRUSR, tx, + debugfs_create_file(name, 0400, tx, &nn->r_vecs[i], &nfp_tx_q_fops); } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index e1dae0616f52..c9016419bfa0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -179,7 +179,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_SWITCH_STATS_LEN 9 -#define NN_RVEC_GATHER_STATS 8 +#define NN_RVEC_GATHER_STATS 9 #define NN_RVEC_PER_Q_STATS 3 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) @@ -468,6 +468,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) data = nfp_pr_et(data, "hw_rx_csum_ok"); data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); + data = nfp_pr_et(data, "hw_rx_csum_complete"); data = nfp_pr_et(data, "hw_rx_csum_err"); data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); data = nfp_pr_et(data, "hw_tx_csum"); @@ -493,18 +494,19 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) data[0] = nn->r_vecs[i].rx_pkts; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; - tmp[2] = nn->r_vecs[i].hw_csum_rx_error; - tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail; + tmp[2] = nn->r_vecs[i].hw_csum_rx_complete; + tmp[3] = nn->r_vecs[i].hw_csum_rx_error; + tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); do { start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); data[1] = nn->r_vecs[i].tx_pkts; data[2] = nn->r_vecs[i].tx_busy; - tmp[4] = nn->r_vecs[i].hw_csum_tx; - tmp[5] = nn->r_vecs[i].hw_csum_tx_inner; - tmp[6] = nn->r_vecs[i].tx_gather; - tmp[7] = nn->r_vecs[i].tx_lso; + tmp[5] = nn->r_vecs[i].hw_csum_tx; + tmp[6] = nn->r_vecs[i].hw_csum_tx_inner; + tmp[7] = nn->r_vecs[i].tx_gather; + tmp[8] = nn->r_vecs[i].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); data += NN_RVEC_PER_Q_STATS; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 619570524d2a..0cd077addb26 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -196,8 +196,19 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev, static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_repr *repr = netdev_priv(netdev); + int err; - return nfp_app_change_mtu(repr->app, netdev, new_mtu); + err = nfp_app_check_mtu(repr->app, netdev, new_mtu); + if (err) + return err; + + err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu); + if (err) + return err; + + netdev->mtu = new_mtu; + + return 0; } static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 39abac678b71..99bb679a9801 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -71,10 +71,11 @@ /* CPP address to retrieve the data from */ #define NSP_BUFFER 0x10 #define NSP_BUFFER_CPP GENMASK_ULL(63, 40) -#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) -#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER 0x18 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER_CONFIG 0x20 #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) @@ -427,8 +428,8 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); if (in_buf && in_size) { err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); diff --git a/drivers/net/ethernet/netronome/nfp/nic/Makefile b/drivers/net/ethernet/netronome/nfp/nic/Makefile new file mode 100644 index 000000000000..805fa28f391a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nic/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +# kbuild requires Makefile in a directory to build individual objects diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig new file mode 100644 index 000000000000..aa41e5f6e437 --- /dev/null +++ b/drivers/net/ethernet/ni/Kconfig @@ -0,0 +1,27 @@ +# +# National Instuments network device configuration +# + +config NET_VENDOR_NI + bool "National Instruments Devices" + default y + help + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about National Instrument devices. + If you say Y, you will be asked for your specific device in the + following questions. + +if NET_VENDOR_NI + +config NI_XGE_MANAGEMENT_ENET + tristate "National Instruments XGE management enet support" + depends on ARCH_ZYNQ + select PHYLIB + help + Simple LAN device for debug or management purposes. Can + support either 10G or 1G PHYs via SFP+ ports. + +endif diff --git a/drivers/net/ethernet/ni/Makefile b/drivers/net/ethernet/ni/Makefile new file mode 100644 index 000000000000..99c664651c51 --- /dev/null +++ b/drivers/net/ethernet/ni/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NI_XGE_MANAGEMENT_ENET) += nixge.o diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c new file mode 100644 index 000000000000..27364b7572fc --- /dev/null +++ b/drivers/net/ethernet/ni/nixge.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2016-2017, National Instruments Corp. + * + * Author: Moritz Fischer <mdf@kernel.org> + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/skbuff.h> +#include <linux/phy.h> +#include <linux/mii.h> +#include <linux/nvmem-consumer.h> +#include <linux/ethtool.h> +#include <linux/iopoll.h> + +#define TX_BD_NUM 64 +#define RX_BD_NUM 128 + +/* Axi DMA Register definitions */ +#define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */ +#define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */ +#define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */ +#define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */ + +#define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */ +#define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */ +#define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */ +#define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */ + +#define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */ +#define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */ + +#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ +#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ +#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ +#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ + +#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ +#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ + +#define XAXIDMA_DELAY_SHIFT 24 +#define XAXIDMA_COALESCE_SHIFT 16 + +#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ +#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ +#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ +#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ + +/* Default TX/RX Threshold and waitbound values for SGDMA mode */ +#define XAXIDMA_DFT_TX_THRESHOLD 24 +#define XAXIDMA_DFT_TX_WAITBOUND 254 +#define XAXIDMA_DFT_RX_THRESHOLD 24 +#define XAXIDMA_DFT_RX_WAITBOUND 254 + +#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ +#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ +#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ +#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ +#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ +#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ +#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ +#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ +#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ + +#define NIXGE_REG_CTRL_OFFSET 0x4000 +#define NIXGE_REG_INFO 0x00 +#define NIXGE_REG_MAC_CTL 0x04 +#define NIXGE_REG_PHY_CTL 0x08 +#define NIXGE_REG_LED_CTL 0x0c +#define NIXGE_REG_MDIO_DATA 0x10 +#define NIXGE_REG_MDIO_ADDR 0x14 +#define NIXGE_REG_MDIO_OP 0x18 +#define NIXGE_REG_MDIO_CTRL 0x1c + +#define NIXGE_ID_LED_CTL_EN BIT(0) +#define NIXGE_ID_LED_CTL_VAL BIT(1) + +#define NIXGE_MDIO_CLAUSE45 BIT(12) +#define NIXGE_MDIO_CLAUSE22 0 +#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10) +#define NIXGE_MDIO_OP_ADDRESS 0 +#define NIXGE_MDIO_C45_WRITE BIT(0) +#define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0)) +#define NIXGE_MDIO_C22_WRITE BIT(0) +#define NIXGE_MDIO_C22_READ BIT(1) +#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5) +#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0) + +#define NIXGE_REG_MAC_LSB 0x1000 +#define NIXGE_REG_MAC_MSB 0x1004 + +/* Packet size info */ +#define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */ +#define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ +#define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */ +#define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ + +#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) +#define NIXGE_MAX_JUMBO_FRAME_SIZE \ + (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) + +struct nixge_hw_dma_bd { + u32 next; + u32 reserved1; + u32 phys; + u32 reserved2; + u32 reserved3; + u32 reserved4; + u32 cntrl; + u32 status; + u32 app0; + u32 app1; + u32 app2; + u32 app3; + u32 app4; + u32 sw_id_offset; + u32 reserved5; + u32 reserved6; +}; + +struct nixge_tx_skb { + struct sk_buff *skb; + dma_addr_t mapping; + size_t size; + bool mapped_as_page; +}; + +struct nixge_priv { + struct net_device *ndev; + struct napi_struct napi; + struct device *dev; + + /* Connection to PHY device */ + struct device_node *phy_node; + phy_interface_t phy_mode; + + int link; + unsigned int speed; + unsigned int duplex; + + /* MDIO bus data */ + struct mii_bus *mii_bus; /* MII bus reference */ + + /* IO registers, dma functions and IRQs */ + void __iomem *ctrl_regs; + void __iomem *dma_regs; + + struct tasklet_struct dma_err_tasklet; + + int tx_irq; + int rx_irq; + u32 last_link; + + /* Buffer descriptors */ + struct nixge_hw_dma_bd *tx_bd_v; + struct nixge_tx_skb *tx_skb; + dma_addr_t tx_bd_p; + + struct nixge_hw_dma_bd *rx_bd_v; + dma_addr_t rx_bd_p; + u32 tx_bd_ci; + u32 tx_bd_tail; + u32 rx_bd_ci; + + u32 coalesce_count_rx; + u32 coalesce_count_tx; +}; + +static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) +{ + writel(val, priv->dma_regs + offset); +} + +static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) +{ + return readl(priv->dma_regs + offset); +} + +static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val) +{ + writel(val, priv->ctrl_regs + offset); +} + +static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) +{ + return readl(priv->ctrl_regs + offset); +} + +#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ + readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \ + (sleep_us), (timeout_us)) + +#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ + readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \ + (sleep_us), (timeout_us)) + +static void nixge_hw_dma_bd_release(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < RX_BD_NUM; i++) { + dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys, + NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb((struct sk_buff *) + (priv->rx_bd_v[i].sw_id_offset)); + } + + if (priv->rx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*priv->rx_bd_v) * RX_BD_NUM, + priv->rx_bd_v, + priv->rx_bd_p); + + if (priv->tx_skb) + devm_kfree(ndev->dev.parent, priv->tx_skb); + + if (priv->tx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*priv->tx_bd_v) * TX_BD_NUM, + priv->tx_bd_v, + priv->tx_bd_p); +} + +static int nixge_hw_dma_bd_init(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u32 cr; + int i; + + /* Reset the indexes which are used for accessing the BDs */ + priv->tx_bd_ci = 0; + priv->tx_bd_tail = 0; + priv->rx_bd_ci = 0; + + /* Allocate the Tx and Rx buffer descriptors. */ + priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*priv->tx_bd_v) * TX_BD_NUM, + &priv->tx_bd_p, GFP_KERNEL); + if (!priv->tx_bd_v) + goto out; + + priv->tx_skb = devm_kzalloc(ndev->dev.parent, + sizeof(*priv->tx_skb) * + TX_BD_NUM, + GFP_KERNEL); + if (!priv->tx_skb) + goto out; + + priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*priv->rx_bd_v) * RX_BD_NUM, + &priv->rx_bd_p, GFP_KERNEL); + if (!priv->rx_bd_v) + goto out; + + for (i = 0; i < TX_BD_NUM; i++) { + priv->tx_bd_v[i].next = priv->tx_bd_p + + sizeof(*priv->tx_bd_v) * + ((i + 1) % TX_BD_NUM); + } + + for (i = 0; i < RX_BD_NUM; i++) { + priv->rx_bd_v[i].next = priv->rx_bd_p + + sizeof(*priv->rx_bd_v) * + ((i + 1) % RX_BD_NUM); + + skb = netdev_alloc_skb_ip_align(ndev, + NIXGE_MAX_JUMBO_FRAME_SIZE); + if (!skb) + goto out; + + priv->rx_bd_v[i].sw_id_offset = (u32)skb; + priv->rx_bd_v[i].phys = + dma_map_single(ndev->dev.parent, + skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; + } + + /* Start updating the Rx channel control register */ + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | + ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = ((cr & ~XAXIDMA_DELAY_MASK) | + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Write to the Rx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); + + /* Start updating the Tx channel control register */ + cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | + ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Write to the Tx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); + + /* Populate the tail pointer and bring the Rx Axi DMA engine out of + * halted state. This will make the Rx side ready for reception. + */ + nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + + (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); + + /* Write to the RS (Run-stop) bit in the Tx channel control register. + * Tx channel is now ready to run. But only after we write to the + * tail pointer register that the Tx channel will start transmitting. + */ + nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); + cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + + return 0; +out: + nixge_hw_dma_bd_release(ndev); + return -ENOMEM; +} + +static void __nixge_device_reset(struct nixge_priv *priv, off_t offset) +{ + u32 status; + int err; + + /* Reset Axi DMA. This would reset NIXGE Ethernet core as well. + * The reset process of Axi DMA takes a while to complete as all + * pending commands/transfers will be flushed or completed during + * this reset process. + */ + nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK); + err = nixge_dma_poll_timeout(priv, offset, status, + !(status & XAXIDMA_CR_RESET_MASK), 10, + 1000); + if (err) + netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__); +} + +static void nixge_device_reset(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + + __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET); + __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET); + + if (nixge_hw_dma_bd_init(ndev)) + netdev_err(ndev, "%s: descriptor allocation failed\n", + __func__); + + netif_trans_update(ndev); +} + +static void nixge_handle_link_change(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (phydev->link != priv->link || phydev->speed != priv->speed || + phydev->duplex != priv->duplex) { + priv->link = phydev->link; + priv->speed = phydev->speed; + priv->duplex = phydev->duplex; + phy_print_status(phydev); + } +} + +static void nixge_tx_skb_unmap(struct nixge_priv *priv, + struct nixge_tx_skb *tx_skb) +{ + if (tx_skb->mapping) { + if (tx_skb->mapped_as_page) + dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping, + tx_skb->size, DMA_TO_DEVICE); + else + dma_unmap_single(priv->ndev->dev.parent, + tx_skb->mapping, + tx_skb->size, DMA_TO_DEVICE); + tx_skb->mapping = 0; + } + + if (tx_skb->skb) { + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } +} + +static void nixge_start_xmit_done(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct nixge_hw_dma_bd *cur_p; + struct nixge_tx_skb *tx_skb; + unsigned int status = 0; + u32 packets = 0; + u32 size = 0; + + cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; + tx_skb = &priv->tx_skb[priv->tx_bd_ci]; + + status = cur_p->status; + + while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { + nixge_tx_skb_unmap(priv, tx_skb); + cur_p->status = 0; + + size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; + packets++; + + ++priv->tx_bd_ci; + priv->tx_bd_ci %= TX_BD_NUM; + cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; + tx_skb = &priv->tx_skb[priv->tx_bd_ci]; + status = cur_p->status; + } + + ndev->stats.tx_packets += packets; + ndev->stats.tx_bytes += size; + + if (packets) + netif_wake_queue(ndev); +} + +static int nixge_check_tx_bd_space(struct nixge_priv *priv, + int num_frag) +{ + struct nixge_hw_dma_bd *cur_p; + + cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM]; + if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) + return NETDEV_TX_BUSY; + return 0; +} + +static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct nixge_hw_dma_bd *cur_p; + struct nixge_tx_skb *tx_skb; + dma_addr_t tail_p; + skb_frag_t *frag; + u32 num_frag; + u32 ii; + + num_frag = skb_shinfo(skb)->nr_frags; + cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; + tx_skb = &priv->tx_skb[priv->tx_bd_tail]; + + if (nixge_check_tx_bd_space(priv, num_frag)) { + if (!netif_queue_stopped(ndev)) + netif_stop_queue(ndev); + return NETDEV_TX_OK; + } + + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + goto drop; + + cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; + + tx_skb->skb = NULL; + tx_skb->mapping = cur_p->phys; + tx_skb->size = skb_headlen(skb); + tx_skb->mapped_as_page = false; + + for (ii = 0; ii < num_frag; ii++) { + ++priv->tx_bd_tail; + priv->tx_bd_tail %= TX_BD_NUM; + cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; + tx_skb = &priv->tx_skb[priv->tx_bd_tail]; + frag = &skb_shinfo(skb)->frags[ii]; + + cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + goto frag_err; + + cur_p->cntrl = skb_frag_size(frag); + + tx_skb->skb = NULL; + tx_skb->mapping = cur_p->phys; + tx_skb->size = skb_frag_size(frag); + tx_skb->mapped_as_page = true; + } + + /* last buffer of the frame */ + tx_skb->skb = skb; + + cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; + cur_p->app4 = (unsigned long)skb; + + tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; + /* Start the transfer */ + nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); + ++priv->tx_bd_tail; + priv->tx_bd_tail %= TX_BD_NUM; + + return NETDEV_TX_OK; +frag_err: + for (; ii > 0; ii--) { + if (priv->tx_bd_tail) + priv->tx_bd_tail--; + else + priv->tx_bd_tail = TX_BD_NUM - 1; + + tx_skb = &priv->tx_skb[priv->tx_bd_tail]; + nixge_tx_skb_unmap(priv, tx_skb); + + cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; + cur_p->status = 0; + } + dma_unmap_single(priv->ndev->dev.parent, + tx_skb->mapping, + tx_skb->size, DMA_TO_DEVICE); +drop: + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static int nixge_recv(struct net_device *ndev, int budget) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct sk_buff *skb, *new_skb; + struct nixge_hw_dma_bd *cur_p; + dma_addr_t tail_p = 0; + u32 packets = 0; + u32 length = 0; + u32 size = 0; + + cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; + + while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK && + budget > packets)) { + tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * + priv->rx_bd_ci; + + skb = (struct sk_buff *)(cur_p->sw_id_offset); + + length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; + if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) + length = NIXGE_MAX_JUMBO_FRAME_SIZE; + + dma_unmap_single(ndev->dev.parent, cur_p->phys, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb_put(skb, length); + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* For now mark them as CHECKSUM_NONE since + * we don't have offload capabilities + */ + skb->ip_summed = CHECKSUM_NONE; + + napi_gro_receive(&priv->napi, skb); + + size += length; + packets++; + + new_skb = netdev_alloc_skb_ip_align(ndev, + NIXGE_MAX_JUMBO_FRAME_SIZE); + if (!new_skb) + return packets; + + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) { + /* FIXME: bail out and clean up */ + netdev_err(ndev, "Failed to map ...\n"); + } + cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; + cur_p->status = 0; + cur_p->sw_id_offset = (u32)new_skb; + + ++priv->rx_bd_ci; + priv->rx_bd_ci %= RX_BD_NUM; + cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; + } + + ndev->stats.rx_packets += packets; + ndev->stats.rx_bytes += size; + + if (tail_p) + nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); + + return packets; +} + +static int nixge_poll(struct napi_struct *napi, int budget) +{ + struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi); + int work_done; + u32 status, cr; + + work_done = 0; + + work_done = nixge_recv(priv->ndev, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); + + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { + /* If there's more, reschedule, but clear */ + nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); + napi_reschedule(napi); + } else { + /* if not, turn on RX IRQs again ... */ + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); + } + } + + return work_done; +} + +static irqreturn_t nixge_tx_irq(int irq, void *_ndev) +{ + struct nixge_priv *priv = netdev_priv(_ndev); + struct net_device *ndev = _ndev; + unsigned int status; + u32 cr; + + status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { + nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); + nixge_start_xmit_done(priv->ndev); + goto out; + } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) { + netdev_err(ndev, "No interrupts asserted in Tx path\n"); + return IRQ_NONE; + } + if (status & XAXIDMA_IRQ_ERROR_MASK) { + netdev_err(ndev, "DMA Tx error 0x%x\n", status); + netdev_err(ndev, "Current BD is at: 0x%x\n", + (priv->tx_bd_v[priv->tx_bd_ci]).phys); + + cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Write to the Tx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); + + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Write to the Rx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); + + tasklet_schedule(&priv->dma_err_tasklet); + nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); + } +out: + return IRQ_HANDLED; +} + +static irqreturn_t nixge_rx_irq(int irq, void *_ndev) +{ + struct nixge_priv *priv = netdev_priv(_ndev); + struct net_device *ndev = _ndev; + unsigned int status; + u32 cr; + + status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { + /* Turn of IRQs because NAPI */ + nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); + + if (napi_schedule_prep(&priv->napi)) + __napi_schedule(&priv->napi); + goto out; + } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) { + netdev_err(ndev, "No interrupts asserted in Rx path\n"); + return IRQ_NONE; + } + if (status & XAXIDMA_IRQ_ERROR_MASK) { + netdev_err(ndev, "DMA Rx error 0x%x\n", status); + netdev_err(ndev, "Current BD is at: 0x%x\n", + (priv->rx_bd_v[priv->rx_bd_ci]).phys); + + cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Finally write to the Tx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); + + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* write to the Rx channel control register */ + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); + + tasklet_schedule(&priv->dma_err_tasklet); + nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); + } +out: + return IRQ_HANDLED; +} + +static void nixge_dma_err_handler(unsigned long data) +{ + struct nixge_priv *lp = (struct nixge_priv *)data; + struct nixge_hw_dma_bd *cur_p; + struct nixge_tx_skb *tx_skb; + u32 cr, i; + + __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET); + __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET); + + for (i = 0; i < TX_BD_NUM; i++) { + cur_p = &lp->tx_bd_v[i]; + tx_skb = &lp->tx_skb[i]; + nixge_tx_skb_unmap(lp, tx_skb); + + cur_p->phys = 0; + cur_p->cntrl = 0; + cur_p->status = 0; + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; + cur_p->sw_id_offset = 0; + } + + for (i = 0; i < RX_BD_NUM; i++) { + cur_p = &lp->rx_bd_v[i]; + cur_p->status = 0; + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; + } + + lp->tx_bd_ci = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; + + /* Start updating the Rx channel control register */ + cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | + (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = ((cr & ~XAXIDMA_DELAY_MASK) | + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Finally write to the Rx channel control register */ + nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr); + + /* Start updating the Tx channel control register */ + cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | + (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Finally write to the Tx channel control register */ + nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr); + + /* Populate the tail pointer and bring the Rx Axi DMA engine out of + * halted state. This will make the Rx side ready for reception. + */ + nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); + nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + + /* Write to the RS (Run-stop) bit in the Tx channel control register. + * Tx channel is now ready to run. But only after we write to the + * tail pointer register that the Tx channel will start transmitting + */ + nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); + nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); +} + +static int nixge_open(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + struct phy_device *phy; + int ret; + + nixge_device_reset(ndev); + + phy = of_phy_connect(ndev, priv->phy_node, + &nixge_handle_link_change, 0, priv->phy_mode); + if (!phy) + return -ENODEV; + + phy_start(phy); + + /* Enable tasklets for Axi DMA error handling */ + tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler, + (unsigned long)priv); + + napi_enable(&priv->napi); + + /* Enable interrupts for Axi DMA Tx */ + ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev); + if (ret) + goto err_tx_irq; + /* Enable interrupts for Axi DMA Rx */ + ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev); + if (ret) + goto err_rx_irq; + + netif_start_queue(ndev); + + return 0; + +err_rx_irq: + free_irq(priv->tx_irq, ndev); +err_tx_irq: + phy_stop(phy); + phy_disconnect(phy); + tasklet_kill(&priv->dma_err_tasklet); + netdev_err(ndev, "request_irq() failed\n"); + return ret; +} + +static int nixge_stop(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + u32 cr; + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + } + + cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); + cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); + + tasklet_kill(&priv->dma_err_tasklet); + + free_irq(priv->tx_irq, ndev); + free_irq(priv->rx_irq, ndev); + + nixge_hw_dma_bd_release(ndev); + + return 0; +} + +static int nixge_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) > + NIXGE_MAX_JUMBO_FRAME_SIZE) + return -EINVAL; + + ndev->mtu = new_mtu; + + return 0; +} + +static s32 __nixge_hw_set_mac_address(struct net_device *ndev) +{ + struct nixge_priv *priv = netdev_priv(ndev); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB, + (ndev->dev_addr[2]) << 24 | + (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + (ndev->dev_addr[5] << 0)); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB, + (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8))); + + return 0; +} + +static int nixge_net_set_mac_address(struct net_device *ndev, void *p) +{ + int err; + + err = eth_mac_addr(ndev, p); + if (!err) + __nixge_hw_set_mac_address(ndev); + + return err; +} + +static const struct net_device_ops nixge_netdev_ops = { + .ndo_open = nixge_open, + .ndo_stop = nixge_stop, + .ndo_start_xmit = nixge_start_xmit, + .ndo_change_mtu = nixge_change_mtu, + .ndo_set_mac_address = nixge_net_set_mac_address, + .ndo_validate_addr = eth_validate_addr, +}; + +static void nixge_ethtools_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *ed) +{ + strlcpy(ed->driver, "nixge", sizeof(ed->driver)); + strlcpy(ed->bus_info, "platform", sizeof(ed->driver)); +} + +static int nixge_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce) +{ + struct nixge_priv *priv = netdev_priv(ndev); + u32 regval = 0; + + regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); + ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) + >> XAXIDMA_COALESCE_SHIFT; + regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); + ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) + >> XAXIDMA_COALESCE_SHIFT; + return 0; +} + +static int nixge_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce) +{ + struct nixge_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netdev_err(ndev, + "Please stop netif before applying configuration\n"); + return -EBUSY; + } + + if (ecoalesce->rx_coalesce_usecs || + ecoalesce->rx_coalesce_usecs_irq || + ecoalesce->rx_max_coalesced_frames_irq || + ecoalesce->tx_coalesce_usecs || + ecoalesce->tx_coalesce_usecs_irq || + ecoalesce->tx_max_coalesced_frames_irq || + ecoalesce->stats_block_coalesce_usecs || + ecoalesce->use_adaptive_rx_coalesce || + ecoalesce->use_adaptive_tx_coalesce || + ecoalesce->pkt_rate_low || + ecoalesce->rx_coalesce_usecs_low || + ecoalesce->rx_max_coalesced_frames_low || + ecoalesce->tx_coalesce_usecs_low || + ecoalesce->tx_max_coalesced_frames_low || + ecoalesce->pkt_rate_high || + ecoalesce->rx_coalesce_usecs_high || + ecoalesce->rx_max_coalesced_frames_high || + ecoalesce->tx_coalesce_usecs_high || + ecoalesce->tx_max_coalesced_frames_high || + ecoalesce->rate_sample_interval) + return -EOPNOTSUPP; + if (ecoalesce->rx_max_coalesced_frames) + priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + if (ecoalesce->tx_max_coalesced_frames) + priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; + + return 0; +} + +static int nixge_ethtools_set_phys_id(struct net_device *ndev, + enum ethtool_phys_id_state state) +{ + struct nixge_priv *priv = netdev_priv(ndev); + u32 ctrl; + + ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL); + switch (state) { + case ETHTOOL_ID_ACTIVE: + ctrl |= NIXGE_ID_LED_CTL_EN; + /* Enable identification LED override*/ + nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); + return 2; + + case ETHTOOL_ID_ON: + ctrl |= NIXGE_ID_LED_CTL_VAL; + nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); + break; + + case ETHTOOL_ID_OFF: + ctrl &= ~NIXGE_ID_LED_CTL_VAL; + nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + ctrl &= ~NIXGE_ID_LED_CTL_EN; + nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); + break; + } + + return 0; +} + +static const struct ethtool_ops nixge_ethtool_ops = { + .get_drvinfo = nixge_ethtools_get_drvinfo, + .get_coalesce = nixge_ethtools_get_coalesce, + .set_coalesce = nixge_ethtools_set_coalesce, + .set_phys_id = nixge_ethtools_set_phys_id, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link = ethtool_op_get_link, +}; + +static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct nixge_priv *priv = bus->priv; + u32 status, tmp; + int err; + u16 device; + + if (reg & MII_ADDR_C45) { + device = (reg >> 16) & 0x1f; + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); + + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) + | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting address"); + return err; + } + + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + } else { + device = reg & 0x1f; + + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + } + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting read command"); + return err; + } + + status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); + + return status; +} + +static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + struct nixge_priv *priv = bus->priv; + u32 status, tmp; + u16 device; + int err; + + if (reg & MII_ADDR_C45) { + device = (reg >> 16) & 0x1f; + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); + + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) + | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting address"); + return err; + } + + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) + | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) + dev_err(priv->dev, "timeout setting write command"); + } else { + device = reg & 0x1f; + + tmp = NIXGE_MDIO_CLAUSE22 | + NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) + dev_err(priv->dev, "timeout setting write command"); + } + + return err; +} + +static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np) +{ + struct mii_bus *bus; + + bus = devm_mdiobus_alloc(priv->dev); + if (!bus) + return -ENOMEM; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); + bus->priv = priv; + bus->name = "nixge_mii_bus"; + bus->read = nixge_mdio_read; + bus->write = nixge_mdio_write; + bus->parent = priv->dev; + + priv->mii_bus = bus; + + return of_mdiobus_register(bus, np); +} + +static void *nixge_get_nvmem_address(struct device *dev) +{ + struct nvmem_cell *cell; + size_t cell_size; + char *mac; + + cell = nvmem_cell_get(dev, "address"); + if (IS_ERR(cell)) + return cell; + + mac = nvmem_cell_read(cell, &cell_size); + nvmem_cell_put(cell); + + return mac; +} + +static int nixge_probe(struct platform_device *pdev) +{ + struct nixge_priv *priv; + struct net_device *ndev; + struct resource *dmares; + const char *mac_addr; + int err; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->features = NETIF_F_SG; + ndev->netdev_ops = &nixge_netdev_ops; + ndev->ethtool_ops = &nixge_ethtool_ops; + + /* MTU range: 64 - 9000 */ + ndev->min_mtu = 64; + ndev->max_mtu = NIXGE_JUMBO_MTU; + + mac_addr = nixge_get_nvmem_address(&pdev->dev); + if (mac_addr && is_valid_ether_addr(mac_addr)) + ether_addr_copy(ndev->dev_addr, mac_addr); + else + eth_hw_addr_random(ndev); + + priv = netdev_priv(ndev); + priv->ndev = ndev; + priv->dev = &pdev->dev; + + netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); + + dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares); + if (IS_ERR(priv->dma_regs)) { + netdev_err(ndev, "failed to map dma regs\n"); + return PTR_ERR(priv->dma_regs); + } + priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; + __nixge_hw_set_mac_address(ndev); + + priv->tx_irq = platform_get_irq_byname(pdev, "tx"); + if (priv->tx_irq < 0) { + netdev_err(ndev, "could not find 'tx' irq"); + return priv->tx_irq; + } + + priv->rx_irq = platform_get_irq_byname(pdev, "rx"); + if (priv->rx_irq < 0) { + netdev_err(ndev, "could not find 'rx' irq"); + return priv->rx_irq; + } + + priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; + priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + + err = nixge_mdio_setup(priv, pdev->dev.of_node); + if (err) { + netdev_err(ndev, "error registering mdio bus"); + goto free_netdev; + } + + priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (priv->phy_mode < 0) { + netdev_err(ndev, "not find \"phy-mode\" property\n"); + err = -EINVAL; + goto unregister_mdio; + } + + priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_node) { + netdev_err(ndev, "not find \"phy-handle\" property\n"); + err = -EINVAL; + goto unregister_mdio; + } + + err = register_netdev(priv->ndev); + if (err) { + netdev_err(ndev, "register_netdev() error (%i)\n", err); + goto unregister_mdio; + } + + return 0; + +unregister_mdio: + mdiobus_unregister(priv->mii_bus); + +free_netdev: + free_netdev(ndev); + + return err; +} + +static int nixge_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct nixge_priv *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + + mdiobus_unregister(priv->mii_bus); + + free_netdev(ndev); + + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id nixge_dt_ids[] = { + { .compatible = "ni,xge-enet-2.00", }, + {}, +}; +MODULE_DEVICE_TABLE(of, nixge_dt_ids); + +static struct platform_driver nixge_driver = { + .probe = nixge_probe, + .remove = nixge_remove, + .driver = { + .name = "nixge", + .of_match_table = of_match_ptr(nixge_dt_ids), + }, +}; +module_platform_driver(nixge_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("National Instruments XGE Management MAC"); +MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>"); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 3dd973475125..0ea141ece19e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -603,7 +603,7 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) static int netxen_nic_validate_header(struct netxen_adapter *adapter) - { +{ const u8 *unirom = adapter->fw->data; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; u32 fw_file_size = adapter->fw->size; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index f2e8de607119..8259e8309320 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2829,9 +2829,9 @@ netxen_show_bridged_mode(struct device *dev, } static const struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = netxen_show_bridged_mode, - .store = netxen_store_bridged_mode, + .attr = { .name = "bridged_mode", .mode = 0644 }, + .show = netxen_show_bridged_mode, + .store = netxen_store_bridged_mode, }; static ssize_t @@ -2861,7 +2861,7 @@ netxen_show_diag_mode(struct device *dev, } static const struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "diag_mode", .mode = 0644 }, .show = netxen_show_diag_mode, .store = netxen_store_diag_mode, }; @@ -3006,14 +3006,14 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, static const struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "crb", .mode = 0644 }, .size = 0, .read = netxen_sysfs_read_crb, .write = netxen_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "mem", .mode = 0644 }, .size = 0, .read = netxen_sysfs_read_mem, .write = netxen_sysfs_write_mem, @@ -3142,7 +3142,7 @@ out: } static const struct bin_attribute bin_attr_dimm = { - .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, + .attr = { .name = "dimm", .mode = 0644 }, .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 69488554f4b9..e07460a68d30 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -81,6 +81,13 @@ enum qed_coalescing_mode { QED_COAL_MODE_ENABLE }; +enum qed_nvm_cmd { + QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, + QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA, + QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM, + QED_GET_MCP_NVM_RESP = 0xFFFFFF00 +}; + struct qed_eth_cb_ops; struct qed_dev_info; union qed_mcp_protocol_stats; @@ -437,6 +444,11 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; +struct qed_nvm_image_info { + u32 num_images; + struct bist_nvm_image_att *image_att; +}; + #define DRV_MODULE_VERSION \ __stringify(QED_MAJOR_VERSION) "." \ __stringify(QED_MINOR_VERSION) "." \ @@ -561,6 +573,9 @@ struct qed_hwfn { /* L2-related */ struct qed_l2_info *p_l2_info; + /* Nvm images number and attributes */ + struct qed_nvm_image_info nvm_info; + struct qed_ptt *p_arfs_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 6f546e869d8d..00f41c145d4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) if (rc) return rc; - /* Free Task CXT */ + /* Free Task CXT ( Intentionally RoCE as task-id is shared between + * RoCE and iWARP ) + */ + proto = PROTOCOLID_ROCE; rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, qed_cxt_get_proto_tid_count(p_hwfn, proto)); if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index fdf37abee3d3..4926c5532fba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -265,6 +265,7 @@ struct grc_param_defs { u32 min; u32 max; bool is_preset; + bool is_persistent; u32 exclude_all_preset_val; u32 crash_preset_val; }; @@ -1520,129 +1521,129 @@ static struct platform_defs s_platform_defs[] = { static struct grc_param_defs s_grc_param_defs[] = { /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{0, 0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, - /* DBG_GRC_PARAM_RESERVED */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */ + {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_UNSTALL */ - {{0, 0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_NUM_LCIDS */ - {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, - MAX_LCIDS}, + {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false, + MAX_LCIDS, MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, - MAX_LTIDS}, + {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false, + MAX_LTIDS, MAX_LTIDS}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, false, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, false, 0, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{0, 0, 0}, 0, 1, false, 1, 0}, + {{0, 0, 0}, 0, 1, false, false, 1, 0}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{1, 1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_NO_FW_VER */ - {{0, 0, 0}, 0, 1, false, 0, 0} + {{0, 0, 0}, 0, 1, false, false, 0, 0} }; static struct rss_mem_defs s_rss_mem_defs[] = { @@ -4731,8 +4732,13 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1); - /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */ - if (mcp_access) { + /* If MCP Trace meta size parameter was set, use it. + * Otherwise, read trace meta. + * trace_meta_size_bytes is dword-aligned. + */ + trace_meta_size_bytes = + qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE); + if ((!trace_meta_size_bytes || dump) && mcp_access) { status = qed_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, @@ -5063,8 +5069,9 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) u32 i; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) - dev_data->grc.param_val[i] = - s_grc_param_defs[i].default_val[dev_data->chip_id]; + if (!s_grc_param_defs[i].is_persistent) + dev_data->grc.param_val[i] = + s_grc_param_defs[i].default_val[dev_data->chip_id]; } enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, @@ -6071,10 +6078,14 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /******************************** Variables **********************************/ -/* MCP Trace meta data - used in case the dump doesn't contain the meta data - * (e.g. due to no NVRAM access). +/* MCP Trace meta data array - used in case the dump doesn't contain the + * meta data (e.g. due to no NVRAM access). */ -static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 }; +static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 }; + +/* Parsed MCP Trace meta data info, based on MCP trace meta array */ +static struct mcp_trace_meta s_mcp_trace_meta; +static bool s_mcp_trace_meta_valid; /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; @@ -6104,6 +6115,9 @@ static u32 qed_read_from_cyclic_buf(void *buf, val_ptr = (u8 *)&val; + /* Assume running on a LITTLE ENDIAN and the buffer is network order + * (BIG ENDIAN), as high order bytes are placed in lower memory address. + */ for (i = 0; i < num_bytes_to_read; i++) { val_ptr[i] = bytes_buf[*offset]; *offset = qed_cyclic_add(*offset, 1, buf_size); @@ -6185,7 +6199,7 @@ static u32 qed_read_param(u32 *dump_buf, offset += 4; } - return offset / 4; + return (u32)offset / 4; } /* Reads a section header from the specified buffer. @@ -6503,6 +6517,8 @@ static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, { u32 i; + s_mcp_trace_meta_valid = false; + /* Release modules */ if (meta->modules) { for (i = 0; i < meta->modules_num; i++) @@ -6529,6 +6545,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, u8 *meta_buf_bytes = (u8 *)meta_buf; u32 offset = 0, signature, i; + /* Free the previous meta before loading a new one. */ + if (s_mcp_trace_meta_valid) + qed_mcp_trace_free_meta(p_hwfn, meta); + memset(meta, 0, sizeof(*meta)); /* Read first signature */ @@ -6594,31 +6614,153 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, format_len, format_ptr->format_str); } + s_mcp_trace_meta_valid = true; return DBG_STATUS_OK; } +/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results + * are printed to it. The parsing status is returned. + * Arguments: + * trace_buf - MCP trace cyclic buffer + * trace_buf_size - MCP trace cyclic buffer size in bytes + * data_offset - offset in bytes of the data to parse in the MCP trace cyclic + * buffer. + * data_size - size in bytes of data to parse. + * parsed_buf - destination buffer for parsed data. + * parsed_bytes - size of parsed data in bytes. + */ +static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, + u32 trace_buf_size, + u32 data_offset, + u32 data_size, + char *parsed_buf, + u32 *parsed_bytes) +{ + u32 param_mask, param_shift; + enum dbg_status status; + + *parsed_bytes = 0; + + if (!s_mcp_trace_meta_valid) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + status = DBG_STATUS_OK; + + while (data_size) { + struct mcp_trace_format *format_ptr; + u8 format_level, format_module; + u32 params[3] = { 0, 0, 0 }; + u32 header, format_idx, i; + + if (data_size < MFW_TRACE_ENTRY_SIZE) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + header = qed_read_from_cyclic_buf(trace_buf, + &data_offset, + trace_buf_size, + MFW_TRACE_ENTRY_SIZE); + data_size -= MFW_TRACE_ENTRY_SIZE; + format_idx = header & MFW_TRACE_EVENTID_MASK; + + /* Skip message if its index doesn't exist in the meta data */ + if (format_idx > s_mcp_trace_meta.formats_num) { + u8 format_size = + (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> + MFW_TRACE_PRM_SIZE_SHIFT); + + if (data_size < format_size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + data_offset = qed_cyclic_add(data_offset, + format_size, + trace_buf_size); + data_size -= format_size; + continue; + } + + format_ptr = &s_mcp_trace_meta.formats[format_idx]; + + for (i = 0, + param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, + param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT; + i < MCP_TRACE_FORMAT_MAX_PARAMS; + i++, + param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, + param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { + /* Extract param size (0..3) */ + u8 param_size = (u8)((format_ptr->data & param_mask) >> + param_shift); + + /* If the param size is zero, there are no other + * parameters. + */ + if (!param_size) + break; + + /* Size is encoded using 2 bits, where 3 is used to + * encode 4. + */ + if (param_size == 3) + param_size = 4; + + if (data_size < param_size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + params[i] = qed_read_from_cyclic_buf(trace_buf, + &data_offset, + trace_buf_size, + param_size); + data_size -= param_size; + } + + format_level = (u8)((format_ptr->data & + MCP_TRACE_FORMAT_LEVEL_MASK) >> + MCP_TRACE_FORMAT_LEVEL_SHIFT); + format_module = (u8)((format_ptr->data & + MCP_TRACE_FORMAT_MODULE_MASK) >> + MCP_TRACE_FORMAT_MODULE_SHIFT); + if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + /* Print current message to results buffer */ + *parsed_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + "%s %-8s: ", + s_mcp_trace_level_str[format_level], + s_mcp_trace_meta.modules[format_module]); + *parsed_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + format_ptr->format_str, + params[0], params[1], params[2]); + } + + /* Add string NULL terminator */ + (*parsed_bytes)++; + + return status; +} + /* Parses an MCP Trace dump buffer. * If result_buf is not NULL, the MCP Trace results are printed to it. * In any case, the required results buffer size is assigned to - * parsed_results_bytes. + * parsed_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - char *results_buf, - u32 *parsed_results_bytes) + char *parsed_buf, + u32 *parsed_bytes) { - u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords; - u32 param_mask, param_shift, param_num_val, num_section_params; const char *section_name, *param_name, *param_str_val; - u32 offset, results_offset = 0; - struct mcp_trace_meta meta; + u32 data_size, trace_data_dwords, trace_meta_dwords; + u32 offset, results_offset, parsed_buf_bytes; + u32 param_num_val, num_section_params; struct mcp_trace *trace; enum dbg_status status; const u32 *meta_buf; u8 *trace_buf; - *parsed_results_bytes = 0; + *parsed_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6629,7 +6771,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, - results_buf, &results_offset); + parsed_buf, &results_offset); /* Read trace_data section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6646,8 +6788,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, trace = (struct mcp_trace *)dump_buf; trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; - end_offset = trace->trace_prod; - bytes_left = qed_cyclic_sub(end_offset, offset, trace->size); + data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); dump_buf += trace_data_dwords; /* Read meta_data section */ @@ -6664,126 +6805,33 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Choose meta data buffer */ if (!trace_meta_dwords) { /* Dump doesn't include meta data */ - if (!s_mcp_trace_meta.ptr) + if (!s_mcp_trace_meta_arr.ptr) return DBG_STATUS_MCP_TRACE_NO_META; - meta_buf = s_mcp_trace_meta.ptr; + meta_buf = s_mcp_trace_meta_arr.ptr; } else { /* Dump includes meta data */ meta_buf = dump_buf; } /* Allocate meta data memory */ - status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta); + status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta); if (status != DBG_STATUS_OK) - goto free_mem; - - /* Ignore the level and modules masks - just print everything that is - * already in the buffer. - */ - while (bytes_left) { - struct mcp_trace_format *format_ptr; - u8 format_level, format_module; - u32 params[3] = { 0, 0, 0 }; - u32 header, format_idx, i; - - if (bytes_left < MFW_TRACE_ENTRY_SIZE) { - status = DBG_STATUS_MCP_TRACE_BAD_DATA; - goto free_mem; - } - - header = qed_read_from_cyclic_buf(trace_buf, - &offset, - trace->size, - MFW_TRACE_ENTRY_SIZE); - bytes_left -= MFW_TRACE_ENTRY_SIZE; - format_idx = header & MFW_TRACE_EVENTID_MASK; - - /* Skip message if its index doesn't exist in the meta data */ - if (format_idx > meta.formats_num) { - u8 format_size = - (u8)((header & - MFW_TRACE_PRM_SIZE_MASK) >> - MFW_TRACE_PRM_SIZE_SHIFT); - - if (bytes_left < format_size) { - status = DBG_STATUS_MCP_TRACE_BAD_DATA; - goto free_mem; - } - - offset = qed_cyclic_add(offset, - format_size, trace->size); - bytes_left -= format_size; - continue; - } - - format_ptr = &meta.formats[format_idx]; - - for (i = 0, - param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = - MCP_TRACE_FORMAT_P1_SIZE_SHIFT; - i < MCP_TRACE_FORMAT_MAX_PARAMS; - i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, - param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { - /* Extract param size (0..3) */ - u8 param_size = - (u8)((format_ptr->data & - param_mask) >> param_shift); - - /* If the param size is zero, there are no other - * parameters. - */ - if (!param_size) - break; - - /* Size is encoded using 2 bits, where 3 is used to - * encode 4. - */ - if (param_size == 3) - param_size = 4; - - if (bytes_left < param_size) { - status = DBG_STATUS_MCP_TRACE_BAD_DATA; - goto free_mem; - } - - params[i] = qed_read_from_cyclic_buf(trace_buf, - &offset, - trace->size, - param_size); - - bytes_left -= param_size; - } + return status; - format_level = - (u8)((format_ptr->data & - MCP_TRACE_FORMAT_LEVEL_MASK) >> - MCP_TRACE_FORMAT_LEVEL_SHIFT); - format_module = - (u8)((format_ptr->data & - MCP_TRACE_FORMAT_MODULE_MASK) >> - MCP_TRACE_FORMAT_MODULE_SHIFT); - if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) { - status = DBG_STATUS_MCP_TRACE_BAD_DATA; - goto free_mem; - } + status = qed_parse_mcp_trace_buf(trace_buf, + trace->size, + offset, + data_size, + parsed_buf ? + parsed_buf + results_offset : + NULL, + &parsed_buf_bytes); + if (status != DBG_STATUS_OK) + return status; - /* Print current message to results buffer */ - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), "%s %-8s: ", - s_mcp_trace_level_str[format_level], - meta.modules[format_module]); - results_offset += - sprintf(qed_get_buf_ptr(results_buf, - results_offset), - format_ptr->format_str, params[0], params[1], - params[2]); - } + *parsed_bytes = results_offset + parsed_buf_bytes; -free_mem: - *parsed_results_bytes = results_offset + 1; - qed_mcp_trace_free_meta(p_hwfn, &meta); - return status; + return DBG_STATUS_OK; } /* Parses a Reg FIFO dump buffer. @@ -7291,8 +7339,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) { - s_mcp_trace_meta.ptr = data; - s_mcp_trace_meta.size_in_dwords = size; + s_mcp_trace_meta_arr.ptr = data; + s_mcp_trace_meta_arr.size_in_dwords = size; } enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7316,6 +7364,19 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, results_buf, &parsed_buf_size); } +enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf) +{ + u32 parsed_bytes; + + return qed_parse_mcp_trace_buf(dump_buf, + num_dumped_bytes, + 0, + num_dumped_bytes, + results_buf, &parsed_bytes); +} + enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, @@ -7891,6 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) } } + qed_set_debug_engine(cdev, org_engine); /* mcp_trace */ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); @@ -7903,8 +7965,6 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } - qed_set_debug_engine(cdev, org_engine); - return 0; } @@ -7929,9 +7989,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); } + qed_set_debug_engine(cdev, org_engine); + /* Engine common */ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); - qed_set_debug_engine(cdev, org_engine); return regs_len; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 553a6d17260e..d2ad5e92c74f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -298,8 +298,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ - qm_info->vport_rl_en = 1; - qm_info->vport_wfq_en = 1; + qm_info->vport_rl_en = true; + qm_info->vport_wfq_en = true; /* TC config is different for AH 4 port */ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; @@ -407,6 +407,7 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; qm_info->qm_pq_params[pq_idx].tc_id = tc; @@ -727,8 +728,9 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) pq = &(qm_info->qm_pq_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, - "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", qm_info->start_pq + i, + pq->port_id, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid); } @@ -1276,9 +1278,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info) { if (p_hwfn->mcp_info->func_info.bandwidth_max) - qm_info->pf_rl_en = 1; + qm_info->pf_rl_en = true; if (p_hwfn->mcp_info->func_info.bandwidth_min) - qm_info->pf_wfq_en = 1; + qm_info->pf_wfq_en = true; } memset(¶ms, 0, sizeof(params)); @@ -1630,7 +1632,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); } - p_hwfn->b_int_enabled = 1; + p_hwfn->b_int_enabled = true; return 0; } @@ -2930,6 +2932,12 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } +static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; +} + static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -2993,12 +3001,25 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); } + /* NVRAM info initialization and population */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to populate nvm info shadow\n"); + goto err2; + } + } + /* Allocate the init RT array and initialize the init-ops engine */ rc = qed_init_alloc(p_hwfn); if (rc) - goto err2; + goto err3; return rc; +err3: + if (IS_LEAD_HWFN(p_hwfn)) + qed_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -3054,6 +3075,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); + qed_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -3086,6 +3108,8 @@ void qed_hw_remove(struct qed_dev *cdev) } qed_iov_free_hw_info(cdev); + + qed_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index de873d770575..7f5ec42dde48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -612,7 +612,7 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 reserved16; __le16 tx_bd_cons; __le16 tx_bd_or_spq_prod; - __le16 word5; + __le16 updated_qm_pq_id; __le16 conn_dpi; u8 byte3; u8 byte4; @@ -1005,7 +1005,9 @@ enum fw_flow_ctrl_mode { enum gft_profile_type { GFT_PROFILE_TYPE_4_TUPLE, GFT_PROFILE_TYPE_L4_DST_PORT, - GFT_PROFILE_TYPE_IP_DST_PORT, + GFT_PROFILE_TYPE_IP_DST_ADDR, + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE, MAX_GFT_PROFILE_TYPE }; @@ -1133,7 +1135,7 @@ struct protocol_dcb_data { u8 dcb_priority; u8 dcb_tc; u8 dscp_val; - u8 reserved0; + u8 dcb_dont_add_vlan0; }; /* Update tunnel configuration */ @@ -1932,7 +1934,7 @@ enum bin_dbg_buffer_type { /* Attention bit mapping */ struct dbg_attn_bit_mapping { - __le16 data; + u16 data; #define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF #define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 #define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 @@ -1941,11 +1943,12 @@ struct dbg_attn_bit_mapping { /* Attention block per-type data */ struct dbg_attn_block_type_data { - __le16 names_offset; - __le16 reserved1; + u16 names_offset; + u16 reserved1; u8 num_regs; u8 reserved2; - __le16 regs_offset; + u16 regs_offset; + }; /* Block attentions */ @@ -1955,15 +1958,15 @@ struct dbg_attn_block { /* Attention register result */ struct dbg_attn_reg_result { - __le32 data; + u32 data; #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 - __le16 block_attn_offset; - __le16 reserved; - __le32 sts_val; - __le32 mask_val; + u16 block_attn_offset; + u16 reserved; + u32 sts_val; + u32 mask_val; }; /* Attention block result */ @@ -1974,13 +1977,13 @@ struct dbg_attn_block_result { #define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 #define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F #define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 - __le16 names_offset; + u16 names_offset; struct dbg_attn_reg_result reg_results[15]; }; /* Mode header */ struct dbg_mode_hdr { - __le16 data; + u16 data; #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 #define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 #define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF @@ -1990,14 +1993,14 @@ struct dbg_mode_hdr { /* Attention register */ struct dbg_attn_reg { struct dbg_mode_hdr mode; - __le16 block_attn_offset; - __le32 data; + u16 block_attn_offset; + u32 data; #define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 #define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 - __le32 sts_clr_address; - __le32 mask_address; + u32 sts_clr_address; + u32 mask_address; }; /* Attention types */ @@ -2011,14 +2014,14 @@ enum dbg_attn_type { struct dbg_bus_block { u8 num_of_lines; u8 has_latency_events; - __le16 lines_offset; + u16 lines_offset; }; /* Debug Bus block user data */ struct dbg_bus_block_user_data { u8 num_of_lines; u8 has_latency_events; - __le16 names_offset; + u16 names_offset; }; /* Block Debug line data */ @@ -2042,12 +2045,12 @@ struct dbg_dump_cond_hdr { /* Memory data for registers dump */ struct dbg_dump_mem { - __le32 dword0; + u32 dword0; #define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF #define DBG_DUMP_MEM_ADDRESS_SHIFT 0 #define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF #define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - __le32 dword1; + u32 dword1; #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 #define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 @@ -2058,7 +2061,7 @@ struct dbg_dump_mem { /* Register data for registers dump */ struct dbg_dump_reg { - __le32 data; + u32 data; #define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF #define DBG_DUMP_REG_ADDRESS_SHIFT 0 #define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 @@ -2069,7 +2072,7 @@ struct dbg_dump_reg { /* Split header for registers dump */ struct dbg_dump_split_hdr { - __le32 hdr; + u32 hdr; #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 #define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF @@ -2079,33 +2082,33 @@ struct dbg_dump_split_hdr { /* Condition header for idle check */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ - __le16 data_size; /* size in dwords of the data following this header */ + u16 data_size; /* size in dwords of the data following this header */ }; /* Idle Check condition register */ struct dbg_idle_chk_cond_reg { - __le32 data; + u32 data; #define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 #define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 #define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries; + u16 num_entries; u8 entry_size; u8 start_entry; }; /* Idle Check info register */ struct dbg_idle_chk_info_reg { - __le32 data; + u32 data; #define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 #define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 #define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - __le16 size; /* register size in dwords */ + u16 size; /* register size in dwords */ struct dbg_mode_hdr mode; /* Mode header */ }; @@ -2117,8 +2120,8 @@ union dbg_idle_chk_reg { /* Idle Check result header */ struct dbg_idle_chk_result_hdr { - __le16 rule_id; /* Failing rule index */ - __le16 mem_entry_id; /* Failing memory entry index */ + u16 rule_id; /* Failing rule index */ + u16 mem_entry_id; /* Failing memory entry index */ u8 num_dumped_cond_regs; /* number of dumped condition registers */ u8 num_dumped_info_regs; /* number of dumped condition registers */ u8 severity; /* from dbg_idle_chk_severity_types enum */ @@ -2133,29 +2136,29 @@ struct dbg_idle_chk_result_reg_hdr { #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 u8 start_entry; /* index of the first checked entry */ - __le16 size; /* register size in dwords */ + u16 size; /* register size in dwords */ }; /* Idle Check rule */ struct dbg_idle_chk_rule { - __le16 rule_id; /* Idle Check rule ID */ + u16 rule_id; /* Idle Check rule ID */ u8 severity; /* value from dbg_idle_chk_severity_types enum */ u8 cond_id; /* Condition ID */ u8 num_cond_regs; /* number of condition registers */ u8 num_info_regs; /* number of info registers */ u8 num_imms; /* number of immediates in the condition */ u8 reserved1; - __le16 reg_offset; /* offset of this rules registers in the idle check - * register array (in dbg_idle_chk_reg units). - */ - __le16 imm_offset; /* offset of this rules immediate values in the - * immediate values array (in dwords). - */ + u16 reg_offset; /* offset of this rules registers in the idle check + * register array (in dbg_idle_chk_reg units). + */ + u16 imm_offset; /* offset of this rules immediate values in the + * immediate values array (in dwords). + */ }; /* Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { - __le32 data; + u32 data; #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 #define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF @@ -2175,7 +2178,7 @@ enum dbg_idle_chk_severity_types { /* Debug Bus block data */ struct dbg_bus_block_data { - __le16 data; + u16 data; #define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF #define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 #define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF @@ -2238,15 +2241,15 @@ struct dbg_bus_trigger_state_data { /* Debug Bus memory address */ struct dbg_bus_mem_addr { - __le32 lo; - __le32 hi; + u32 lo; + u32 hi; }; /* Debug Bus PCI buffer data */ struct dbg_bus_pci_buf_data { struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ - __le32 size; /* PCI buffer size in bytes */ + u32 size; /* PCI buffer size in bytes */ }; /* Debug Bus Storm EID range filter params */ @@ -2276,15 +2279,15 @@ struct dbg_bus_storm_data { u8 eid_range_not_mask; u8 cid_filter_en; union dbg_bus_storm_eid_params eid_filter_params; - __le32 cid; + u32 cid; }; /* Debug Bus data */ struct dbg_bus_data { - __le32 app_version; + u32 app_version; u8 state; u8 hw_dwords; - __le16 hw_id_mask; + u16 hw_id_mask; u8 num_enabled_blocks; u8 num_enabled_storms; u8 target; @@ -2295,7 +2298,7 @@ struct dbg_bus_data { u8 adding_filter; u8 filter_pre_trigger; u8 filter_post_trigger; - __le16 reserved; + u16 reserved; u8 trigger_en; struct dbg_bus_trigger_state_data trigger_states[3]; u8 next_trigger_state; @@ -2391,8 +2394,8 @@ enum dbg_bus_targets { struct dbg_grc_data { u8 params_initialized; u8 reserved1; - __le16 reserved2; - __le32 param_val[48]; + u16 reserved2; + u32 param_val[48]; }; /* Debug GRC params */ @@ -2414,7 +2417,7 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_CAU, DBG_GRC_PARAM_DUMP_QM, DBG_GRC_PARAM_DUMP_MCP, - DBG_GRC_PARAM_RESERVED, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, DBG_GRC_PARAM_DUMP_CFC, DBG_GRC_PARAM_DUMP_IGU, DBG_GRC_PARAM_DUMP_BRB, @@ -2526,10 +2529,10 @@ enum dbg_storms { /* Idle Check data */ struct idle_chk_data { - __le32 buf_size; + u32 buf_size; u8 buf_size_set; u8 reserved1; - __le16 reserved2; + u16 reserved2; }; /* Debug Tools data (per HW function) */ @@ -2543,7 +2546,7 @@ struct dbg_tools_data { u8 platform_id; u8 initialized; u8 use_dmae; - __le32 num_regs_read; + u32 num_regs_read; }; /********************************/ @@ -2555,10 +2558,10 @@ struct dbg_tools_data { /* BRB RAM init requirements */ struct init_brb_ram_req { - __le32 guranteed_per_tc; - __le32 headroom_per_tc; - __le32 min_pkt_size; - __le32 max_ports_per_engine; + u32 guranteed_per_tc; + u32 headroom_per_tc; + u32 min_pkt_size; + u32 max_ports_per_engine; u8 num_active_tcs[MAX_NUM_PORTS]; }; @@ -2566,21 +2569,21 @@ struct init_brb_ram_req { struct init_ets_tc_req { u8 use_sp; u8 use_wfq; - __le16 weight; + u16 weight; }; /* ETS init requirements */ struct init_ets_req { - __le32 mtu; + u32 mtu; struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; /* NIG LB RL init requirements */ struct init_nig_lb_rl_req { - __le16 lb_mac_rate; - __le16 lb_rate; - __le32 mtu; - __le16 tc_rate[NUM_OF_PHYS_TCS]; + u16 lb_mac_rate; + u16 lb_rate; + u32 mtu; + u16 tc_rate[NUM_OF_PHYS_TCS]; }; /* NIG TC mapping for each priority */ @@ -2598,9 +2601,9 @@ struct init_nig_pri_tc_map_req { struct init_qm_port_params { u8 active; u8 active_phys_tcs; - __le16 num_pbf_cmd_lines; - __le16 num_btb_blocks; - __le16 reserved; + u16 num_pbf_cmd_lines; + u16 num_btb_blocks; + u16 reserved; }; /* QM per-PQ init parameters */ @@ -2609,13 +2612,16 @@ struct init_qm_pq_params { u8 tc_id; u8 wrr_group; u8 rl_valid; + u8 port_id; + u8 reserved0; + u16 reserved1; }; /* QM per-vport init parameters */ struct init_qm_vport_params { - __le32 vport_rl; - __le16 vport_wfq; - __le16 first_tx_pq_id[NUM_OF_TCS]; + u32 vport_rl; + u16 vport_wfq; + u16 first_tx_pq_id[NUM_OF_TCS]; }; /**************************************/ @@ -2639,8 +2645,8 @@ enum chip_ids { }; struct fw_asserts_ram_section { - __le16 section_ram_line_offset; - __le16 section_ram_line_size; + u16 section_ram_line_offset; + u16 section_ram_line_size; u8 list_dword_offset; u8 list_element_dword_size; u8 list_num_elements; @@ -2713,8 +2719,8 @@ enum init_split_types { /* Binary buffer header */ struct bin_buffer_hdr { - __le32 offset; - __le32 length; + u32 offset; + u32 length; }; /* Binary init buffer types */ @@ -2729,7 +2735,7 @@ enum bin_init_buffer_type { /* init array header: raw */ struct init_array_raw_hdr { - __le32 data; + u32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF @@ -2738,7 +2744,7 @@ struct init_array_raw_hdr { /* init array header: standard */ struct init_array_standard_hdr { - __le32 data; + u32 data; #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF @@ -2747,7 +2753,7 @@ struct init_array_standard_hdr { /* init array header: zipped */ struct init_array_zipped_hdr { - __le32 data; + u32 data; #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF @@ -2756,7 +2762,7 @@ struct init_array_zipped_hdr { /* init array header: pattern */ struct init_array_pattern_hdr { - __le32 data; + u32 data; #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF @@ -2783,41 +2789,41 @@ enum init_array_types { /* init operation: callback */ struct init_callback_op { - __le32 op_data; + u32 op_data; #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id; - __le16 block_id; + u16 callback_id; + u16 block_id; }; /* init operation: delay */ struct init_delay_op { - __le32 op_data; + u32 op_data; #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 #define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF #define INIT_DELAY_OP_RESERVED_SHIFT 4 - __le32 delay; + u32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { - __le32 op_data; + u32 op_data; #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 #define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF #define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; - __le16 modes_buf_offset; + u16 reserved2; + u16 modes_buf_offset; }; /* init operation: if_phase */ struct init_if_phase_op { - __le32 op_data; + u32 op_data; #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 #define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 @@ -2826,7 +2832,7 @@ struct init_if_phase_op { #define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - __le32 phase_data; + u32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -2845,31 +2851,31 @@ enum init_mode_ops { /* init operation: raw */ struct init_raw_op { - __le32 op_data; + u32 op_data; #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF #define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2; + u32 param2; }; /* init array params */ struct init_op_array_params { - __le16 size; - __le16 offset; + u16 size; + u16 offset; }; /* Write init operation arguments */ union init_write_args { - __le32 inline_val; - __le32 zeros_count; - __le32 array_offset; + u32 inline_val; + u32 zeros_count; + u32 array_offset; struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { - __le32 data; + u32 data; #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 #define INIT_WRITE_OP_SOURCE_MASK 0x7 @@ -2885,7 +2891,7 @@ struct init_write_op { /* init operation: read */ struct init_read_op { - __le32 op_data; + u32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 #define INIT_READ_OP_POLL_TYPE_MASK 0xF @@ -2894,7 +2900,7 @@ struct init_read_op { #define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 - __le32 expected_val; + u32 expected_val; }; /* Init operations union */ @@ -2939,11 +2945,11 @@ enum init_source_types { /* Internal RAM Offsets macro data */ struct iro { - __le32 base; - __le16 m1; - __le16 m2; - __le16 m3; - __le16 size; + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; }; /***************************** Public Functions *******************************/ @@ -3384,6 +3390,19 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** + * @brief print_mcp_trace_line - Prints MCP Trace results for a single line + * + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param num_dumped_bytes - number of bytes that were dumped. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf); + +/** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@ -4005,6 +4024,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable); +void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable); + /** * @brief qed_gft_disable - Disable GFT * @@ -4348,8 +4370,8 @@ static const struct iro iro_arr[51] = { {0x80, 0x8, 0x0, 0x0, 0x4}, {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, - {0x3e18, 0x0, 0x0, 0x0, 0x78}, - {0x2b58, 0x0, 0x0, 0x0, 0x78}, + {0x3e38, 0x0, 0x0, 0x0, 0x78}, + {0x2b78, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, @@ -4364,7 +4386,7 @@ static const struct iro iro_arr[51] = { {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2cf0, 0x80, 0x0, 0x0, 0x38}, + {0x2d10, 0x80, 0x0, 0x0, 0x38}, {0xf2b8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, @@ -4384,10 +4406,10 @@ static const struct iro iro_arr[51] = { {0x10300, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d28, 0x80, 0x0, 0x0, 0x10}, + {0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xeee0, 0x10, 0x0, 0x0, 0x10}, + {0xed90, 0x10, 0x0, 0x0, 0x10}, {0xa3a0, 0x10, 0x0, 0x0, 0x10}, {0x13108, 0x8, 0x0, 0x0, 0x8}, }; @@ -5151,7 +5173,7 @@ struct e4_xstorm_eth_conn_ag_ctx { __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; - __le16 tx_class; + __le16 updated_qm_pq_id; __le16 conn_dpi; u8 byte3; u8 byte4; @@ -5674,7 +5696,6 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 #define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF #define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 - __le16 reserved2[3]; }; /* Command for setting tpa parameters */ @@ -5712,7 +5733,6 @@ struct eth_vport_tx_mode { #define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF #define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 - __le16 reserved2[3]; }; /* GFT filter update action type */ @@ -5805,7 +5825,8 @@ struct rx_queue_update_ramrod_data { u8 complete_cqe_flg; u8 complete_event_flg; u8 vport_id; - u8 reserved[4]; + u8 set_default_rss_queue; + u8 reserved[3]; u8 reserved1; u8 reserved2; u8 reserved3; @@ -5843,7 +5864,7 @@ struct rx_update_gft_filter_data { u8 flow_id_valid; u8 filter_action; u8 assert_on_error; - u8 reserved; + u8 inner_vlan_removal_en; }; /* Ramrod data for rx queue start ramrod */ @@ -5927,7 +5948,7 @@ struct vport_start_ramrod_data { u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[5]; + u8 reserved[1]; }; /* Ramrod data for vport stop ramrod */ @@ -5992,6 +6013,7 @@ struct vport_update_ramrod_data { struct eth_vport_rx_mode rx_mode; struct eth_vport_tx_mode tx_mode; + __le32 reserved[3]; struct eth_vport_tpa_param tpa_param; struct vport_update_ramrod_mcast approx_mcast; struct eth_vport_rss_config rss_config; @@ -6213,7 +6235,7 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; - __le16 tx_class; + __le16 updated_qm_pq_id; __le16 conn_dpi; u8 byte3; u8 byte4; @@ -6479,7 +6501,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx { __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; - __le16 tx_class; + __le16 updated_qm_pq_id; __le16 conn_dpi; }; @@ -6703,8 +6725,8 @@ struct e4_ystorm_rdma_task_ag_ctx { #define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 #define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; #define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 #define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 @@ -6759,8 +6781,8 @@ struct e4_mstorm_rdma_task_ag_ctx { #define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 #define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; #define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 #define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 @@ -6814,7 +6836,7 @@ struct ustorm_rdma_task_st_ctx { struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; - u8 byte1; + u8 state; __le16 icid; u8 flags0; #define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF @@ -6830,8 +6852,8 @@ struct e4_ustorm_rdma_task_ag_ctx { #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; @@ -6841,8 +6863,8 @@ struct e4_ustorm_rdma_task_ag_ctx { #define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 #define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 #define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 #define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 @@ -6864,10 +6886,17 @@ struct e4_ustorm_rdma_task_ag_ctx { #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; - __le32 reg2; + __le32 sq_cons; __le32 dif_runt_value; - __le32 reg4; + __le32 sge_index; __le32 reg5; + u8 byte2; + u8 byte3; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg6; + __le32 reg7; }; /* RDMA task context */ @@ -6970,7 +6999,9 @@ struct rdma_init_func_hdr { u8 vf_id; u8 vf_valid; u8 relaxed_ordering; - u8 reserved[2]; + __le16 first_reg_srq_id; + __le32 reg_srq_base_addr; + __le32 reserved; }; /* rdma function init ramrod data */ @@ -7077,13 +7108,23 @@ struct rdma_srq_context { /* rdma create qp requester ramrod data */ struct rdma_srq_create_ramrod_data { + u8 flags; +#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_SHIFT 0 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_SHIFT 2 + u8 reserved2; + __le16 xrc_domain; + __le32 xrc_srq_cq_cid; struct regpair pbl_base_addr; __le16 pages_in_srq_pbl; __le16 pd_id; struct rdma_srq_id srq_id; __le16 page_size; - __le16 reserved1; - __le32 reserved2; + __le16 reserved3; + __le32 reserved4; struct regpair producers_addr; }; @@ -7108,372 +7149,8 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; -struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { - u8 reserved0; - u8 state; - u8 flags0; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 - u8 flags1; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 - u8 flags2; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 - u8 flags3; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 - u8 flags4; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 - u8 flags5; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 - u8 flags6; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 - u8 flags7; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 - u8 flags8; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 - u8 flags9; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 - u8 flags10; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 - u8 flags11; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 - u8 flags12; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 - u8 flags13; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 - u8 byte2; - __le16 physical_q0; - __le16 word1; - __le16 word2; - __le16 word3; - __le16 word4; - __le16 word5; - __le16 conn_dpi; - u8 byte3; - u8 byte4; - u8 byte5; - u8 byte6; - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 snd_nxt_psn; - __le32 reg4; -}; - -struct e4_mstorm_rdma_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0; - __le16 word1; - __le32 reg0; - __le32 reg1; -}; - -struct e4_tstorm_rdma_conn_ag_ctx { - u8 reserved0; - u8 byte1; - u8 flags0; -#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 - u8 flags2; -#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 - u8 flags4; -#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le32 reg7; - __le32 reg8; - u8 byte2; - u8 byte3; - __le16 word0; - u8 byte4; - u8 byte5; - __le16 word1; - __le16 word2; - __le16 word3; - __le32 reg9; - __le32 reg10; +struct rdma_xrc_srq_context { + struct regpair temp[9]; }; struct e4_tstorm_rdma_task_ag_ctx { @@ -7561,8 +7238,8 @@ struct e4_ustorm_rdma_conn_ag_ctx { u8 flags0; #define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 #define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 #define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 #define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 @@ -7624,214 +7301,214 @@ struct e4_ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_rdma_conn_ag_ctx { +struct e4_xstorm_roce_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7853,48 +7530,108 @@ struct e4_xstorm_rdma_conn_ag_ctx { __le32 reg6; }; -struct e4_ystorm_rdma_conn_ag_ctx { - u8 byte0; +struct e4_tstorm_roce_conn_ag_ctx { + u8 reserved0; u8 byte1; u8 flags0; -#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; u8 byte2; u8 byte3; __le16 word0; - __le32 reg0; - __le32 reg1; + u8 byte4; + u8 byte5; __le16 word1; __le16 word2; __le16 word3; - __le16 word4; - __le32 reg2; - __le32 reg3; + __le32 reg9; + __le32 reg10; }; /* The roce storm context of Ystorm */ @@ -7933,15 +7670,15 @@ struct e4_roce_conn_context { struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; - struct regpair xstorm_st_padding[2]; - struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; struct mstorm_roce_conn_st_ctx mstorm_st_context; + struct regpair mstorm_st_padding[2]; struct ustorm_roce_conn_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; }; /* roce create qp requester ramrod data */ @@ -7955,8 +7692,8 @@ struct roce_create_qp_req_ramrod_data { #define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_SHIFT 7 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF #define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF @@ -7982,18 +7719,18 @@ struct roce_create_qp_req_ramrod_data { __le16 udp_src_port; __le32 src_gid[4]; __le32 dst_gid[4]; + __le32 cq_cid; struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; u8 reserved3[7]; - __le32 cq_cid; __le16 regular_latency_phy_queue; __le16 dpi; }; /* roce create qp responder ramrod data */ struct roce_create_qp_resp_ramrod_data { - __le16 flags; + __le32 flags; #define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 @@ -8012,6 +7749,11 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17 + __le16 xrc_domain; u8 max_ird; u8 traffic_class; u8 hop_limit; @@ -8037,7 +7779,7 @@ struct roce_create_qp_resp_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; __le16 low_latency_phy_queue; - u8 reserved2[6]; + u8 reserved2[2]; __le32 cq_cid; __le16 regular_latency_phy_queue; __le16 dpi; @@ -8248,6 +7990,270 @@ enum roce_ramrod_cmd_id { MAX_ROCE_RAMROD_CMD_ID }; +struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0; + u8 state; + u8 flags0; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 + u8 flags1; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 + u8 flags7; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 + u8 flags10; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 + u8 flags11; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + +struct e4_mstorm_roce_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + struct e4_mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; @@ -8341,8 +8347,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx { #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 @@ -8350,8 +8356,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx { #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 @@ -8365,8 +8371,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx { #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 @@ -8374,8 +8380,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx { u8 flags4; #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 @@ -8421,7 +8427,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx { u8 byte5; __le16 snd_sq_cons; __le16 conn_dpi; - __le16 word3; + __le16 force_comp_cons; __le32 reg9; __le32 reg10; }; @@ -8445,8 +8451,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 @@ -8454,8 +8460,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 @@ -8469,8 +8475,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 @@ -8478,8 +8484,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { u8 flags4; #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 #define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 @@ -8724,10 +8730,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx { #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 @@ -8774,10 +8780,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx { #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 u8 flags9; #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 @@ -8882,9 +8888,9 @@ struct e4_xstorm_roce_req_conn_ag_ctx { __le16 sq_cmp_cons; __le16 sq_cons; __le16 sq_prod; - __le16 word5; + __le16 dif_error_first_sq_cons; __le16 conn_dpi; - u8 byte3; + u8 dif_error_sge_index; u8 byte4; u8 byte5; u8 byte6; @@ -8892,7 +8898,7 @@ struct e4_xstorm_roce_req_conn_ag_ctx { __le32 ssn; __le32 snd_una_psn; __le32 snd_nxt_psn; - __le32 reg4; + __le32 dif_error_offset; __le32 orq_cons_th; __le32 orq_cons; }; @@ -9128,6 +9134,50 @@ struct e4_xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; +struct e4_ystorm_roce_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct e4_ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; @@ -9236,7 +9286,7 @@ struct pstorm_iwarp_conn_st_ctx { /* The iwarp storm context of Xstorm */ struct xstorm_iwarp_conn_st_ctx { - __le32 reserved[44]; + __le32 reserved[48]; }; struct e4_xstorm_iwarp_conn_ag_ctx { @@ -9377,8 +9427,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx { #define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 #define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 #define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 #define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 #define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 #define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 @@ -9447,8 +9497,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx { #define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 #define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 #define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -9466,7 +9516,7 @@ struct e4_xstorm_iwarp_conn_ag_ctx { __le32 reg2; __le32 more_to_send_seq; __le32 reg4; - __le32 rewinded_snd_max; + __le32 rewinded_snd_max_or_term_opcode; __le32 rd_msn; __le16 irq_prod_via_msdm; __le16 irq_cons; @@ -9476,8 +9526,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx { __le32 orq_cons; __le32 orq_cons_th; u8 byte7; - u8 max_ord; u8 wqe_data_pad_bytes; + u8 max_ord; u8 former_hq_prod; u8 irq_prod_via_msem; u8 byte12; @@ -9506,8 +9556,8 @@ struct e4_tstorm_iwarp_conn_ag_ctx { #define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 #define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 #define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 #define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 #define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 #define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 @@ -9622,7 +9672,6 @@ struct e4_iwarp_conn_context { struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; - struct regpair xstorm_st_padding[2]; struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; @@ -9648,8 +9697,10 @@ struct iwarp_create_qp_ramrod_data { #define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 #define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 #define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 +#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_SHIFT 6 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 7 u8 reserved1; __le16 pd; __le16 sq_num_pages; @@ -9698,6 +9749,7 @@ enum iwarp_eqe_sync_opcode { IWARP_EVENT_TYPE_QUERY_QP, IWARP_EVENT_TYPE_MODIFY_QP, IWARP_EVENT_TYPE_DESTROY_QP, + IWARP_EVENT_TYPE_ABORT_TCP_OFFLOAD, MAX_IWARP_EQE_SYNC_OPCODE }; @@ -9722,6 +9774,8 @@ enum iwarp_fw_return_code { IWARP_EXCEPTION_DETECTED_LLP_RESET, IWARP_EXCEPTION_DETECTED_IRQ_FULL, IWARP_EXCEPTION_DETECTED_RQ_EMPTY, + IWARP_EXCEPTION_DETECTED_SRQ_EMPTY, + IWARP_EXCEPTION_DETECTED_SRQ_LIMIT, IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, @@ -9766,10 +9820,13 @@ struct iwarp_modify_qp_ramrod_data { #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 - __le32 reserved3[3]; - __le32 reserved4[8]; +#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 5 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x3FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 6 + __le16 physical_q0; + __le16 physical_q1; + __le32 reserved1[10]; }; /* MPA params for Enhanced mode */ @@ -9853,6 +9910,7 @@ enum iwarp_ramrod_cmd_id { IWARP_RAMROD_CMD_ID_QUERY_QP, IWARP_RAMROD_CMD_ID_MODIFY_QP, IWARP_RAMROD_CMD_ID_DESTROY_QP, + IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD, MAX_IWARP_RAMROD_CMD_ID }; @@ -11205,7 +11263,7 @@ struct e4_tstorm_iscsi_conn_ag_ctx { #define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; - __le32 reg2; + __le32 rx_tcp_checksum_err_cnt; __le32 reg3; __le32 reg4; __le32 reg5; @@ -12210,8 +12268,11 @@ struct public_drv_mb { #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 #define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 +#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 +#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 #define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 +#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 #define DRV_MSG_CODE_MCP_RESET 0x00090000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 #define DRV_MSG_CODE_MCP_HALT 0x00100000 @@ -12265,7 +12326,6 @@ struct public_drv_mb { #define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 #define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 - #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -12377,7 +12437,10 @@ struct public_drv_mb { #define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 #define FW_MSG_CODE_NVM_OK 0x00010000 +#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 +#define FW_MSG_CODE_PHY_OK 0x00110000 #define FW_MSG_CODE_OK 0x00160000 +#define FW_MSG_CODE_ERROR 0x00170000 #define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 #define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 18fb5062a83d..1365da7c8900 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -467,12 +467,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, u16 *p_first_tx_pq_id; ext_voq = qed_get_ext_voq(p_hwfn, - p_params->port_id, + pq_params[i].port_id, tc_id, p_params->max_phys_tcs_per_port); is_vf_pq = (i >= p_params->num_pf_pqs); - rl_valid = pq_params[i].rl_valid && - pq_params[i].vport_id < max_qm_global_rls; + rl_valid = pq_params[i].rl_valid > 0; /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; @@ -494,10 +493,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, } /* Check RL ID */ - if (pq_params[i].rl_valid && pq_params[i].vport_id >= - max_qm_global_rls) + if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, "Invalid VPORT ID for rate limiter configuration\n"); + rl_valid = false; + } /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, @@ -528,7 +528,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, p_params->pf_id, tc_id, - p_params->port_id, + pq_params[i].port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); @@ -603,6 +603,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; @@ -619,7 +620,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, for (i = 0; i < num_tx_pqs; i++) { ext_voq = qed_get_ext_voq(p_hwfn, - p_params->port_id, + pq_params[i].port_id, pq_params[i].tc_id, p_params->max_phys_tcs_per_port); crd_reg_offset = @@ -1020,7 +1021,8 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, *__p_var = (*__p_var & ~BIT(__offset)) | \ ((enable) ? BIT(__offset) : 0); \ } while (0) -#define PRS_ETH_TUNN_FIC_FORMAT -188897008 +#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 +#define PRS_ETH_OUTPUT_FORMAT -46832 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) @@ -1046,11 +1048,15 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) - qed_wr(p_hwfn, - p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); @@ -1077,11 +1083,15 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) - qed_wr(p_hwfn, - p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); @@ -1126,11 +1136,15 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) - qed_wr(p_hwfn, - p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, @@ -1152,6 +1166,38 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 + +void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC register */ + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + } else { + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} + #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 @@ -1268,6 +1314,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Tunnel type */ + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); @@ -1279,9 +1329,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); - } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); } qed_wr(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index d3eabcf9c86c..af3a28ec04eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -321,7 +321,7 @@ static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); + DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index ca4a81dc1ace..2a2b1018ed1d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); if (eth_type == ETH_P_IP) { + if (iph->protocol != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + cm_info->local_ip[0] = ntohl(iph->daddr); cm_info->remote_ip[0] = ntohl(iph->saddr); cm_info->ip_version = TCP_IPV4; @@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, *payload_len = ntohs(iph->tot_len) - ip_hlen; } else if (eth_type == ETH_P_IPV6) { ip6h = (struct ipv6hdr *)iph; + + if (ip6h->nexthdr != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + for (i = 0; i < 4; i++) { cm_info->local_ip[i] = ntohl(ip6h->daddr.in6_u.u6_addr32[i]); @@ -1784,7 +1799,7 @@ enum qed_iwarp_mpa_pkt_type { /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ #define QED_IWARP_MAX_BDS_PER_FPDU 3 -char *pkt_type_str[] = { +static const char * const pkt_type_str[] = { "QED_IWARP_MPA_PKT_PACKED", "QED_IWARP_MPA_PKT_PARTIAL", "QED_IWARP_MPA_PKT_UNALIGNED" @@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, /* Missing lower byte is now available */ mpa_len = fpdu->fpdu_length | *mpa_data; fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); - fpdu->mpa_frag_len = fpdu->fpdu_length; /* one byte of hdr */ + fpdu->mpa_frag_len = 1; fpdu->incomplete_bytes = fpdu->fpdu_length - 1; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, @@ -2360,13 +2375,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memset(&tx_pkt, 0, sizeof(tx_pkt)); tx_pkt.num_of_bds = 1; - tx_pkt.vlan = data->vlan; - - if (GET_FIELD(data->parse_flags, - PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) - SET_FIELD(tx_pkt.bd_flags, - CORE_TX_BD_DATA_VLAN_INSERTION, 1); - tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; tx_pkt.first_frag = buf->data_phys_addr + diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 893ef08a4b39..e874504e8b28 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1974,7 +1974,7 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) return GFT_PROFILE_TYPE_4_TUPLE; if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) - return GFT_PROFILE_TYPE_IP_DST_PORT; + return GFT_PROFILE_TYPE_IP_DST_ADDR; return GFT_PROFILE_TYPE_L4_DST_PORT; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c4f14fdc4e77..74fc626b1ec1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -591,16 +591,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) } } -static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) -{ - u8 bd_flags = 0; - - if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) - SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1); - - return bd_flags; -} - static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { @@ -744,7 +734,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, struct qed_ooo_buffer *p_buffer; u16 l4_hdr_offset_w; dma_addr_t first_frag; - u16 parse_flags; u8 bd_flags; int rc; @@ -756,8 +745,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, first_frag = p_buffer->rx_buffer_phys_addr + p_buffer->placement_offset; - parse_flags = p_buffer->parse_flags; - bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 27832885a87f..9854aa9139af 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -45,6 +45,7 @@ #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> +#include <linux/crc32.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_ll2_if.h> @@ -1553,6 +1554,342 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, + struct qed_nvm_image_att *nvm_image, + u32 *crc) +{ + u8 *buf = NULL; + int rc, j; + u32 val; + + /* Allocate a buffer for holding the nvram image */ + buf = kzalloc(nvm_image->length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Read image into buffer */ + rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, + buf, nvm_image->length); + if (rc) { + DP_ERR(cdev, "Failed reading image from nvm\n"); + goto out; + } + + /* Convert the buffer into big-endian format (excluding the + * closing 4 bytes of CRC). + */ + for (j = 0; j < nvm_image->length - 4; j += 4) { + val = cpu_to_be32(*(u32 *)&buf[j]); + *(u32 *)&buf[j] = val; + } + + /* Calc CRC for the "actual" image buffer, i.e. not including + * the last 4 CRC bytes. + */ + *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); + +out: + kfree(buf); + + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x4 [command index] | + * 4B | image_type | Options | Number of register settings | + * 8B | Value | + * 12B | Mask | + * 16B | Offset | + * \----------------------------------------------------------------------/ + * There can be several Value-Mask-Offset sets as specified by 'Number of...'. + * Options - 0'b - Calculate & Update CRC for image + */ +static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, + bool *check_resp) +{ + struct qed_nvm_image_att nvm_image; + struct qed_hwfn *p_hwfn; + bool is_crc = false; + u32 image_type; + int rc = 0, i; + u16 len; + + *data += 4; + image_type = **data; + p_hwfn = QED_LEADING_HWFN(cdev); + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) + if (image_type == p_hwfn->nvm_info.image_att[i].image_type) + break; + if (i == p_hwfn->nvm_info.num_images) { + DP_ERR(cdev, "Failed to find nvram image of type %08x\n", + image_type); + return -ENOENT; + } + + nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; + nvm_image.length = p_hwfn->nvm_info.image_att[i].len; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", + **data, image_type, nvm_image.start_addr, + nvm_image.start_addr + nvm_image.length - 1); + (*data)++; + is_crc = !!(**data & BIT(0)); + (*data)++; + len = *((u16 *)*data); + *data += 2; + if (is_crc) { + u32 crc = 0; + + rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); + if (rc) { + DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); + goto exit; + } + + rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, + (nvm_image.start_addr + + nvm_image.length - 4), (u8 *)&crc, 4); + if (rc) + DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", + nvm_image.start_addr + nvm_image.length - 4, rc); + goto exit; + } + + /* Iterate over the values for setting */ + while (len) { + u32 offset, mask, value, cur_value; + u8 buf[4]; + + value = *((u32 *)*data); + *data += 4; + mask = *((u32 *)*data); + *data += 4; + offset = *((u32 *)*data); + *data += 4; + + rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, + 4); + if (rc) { + DP_ERR(cdev, "Failed reading from %08x\n", + nvm_image.start_addr + offset); + goto exit; + } + + cur_value = le32_to_cpu(*((__le32 *)buf)); + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", + nvm_image.start_addr + offset, cur_value, + (cur_value & ~mask) | (value & mask), value, mask); + value = (value & mask) | (cur_value & ~mask); + rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, + nvm_image.start_addr + offset, + (u8 *)&value, 4); + if (rc) { + DP_ERR(cdev, "Failed writing to %08x\n", + nvm_image.start_addr + offset); + goto exit; + } + + len--; + } +exit: + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x3 [command index] | + * 4B | b'0: check_response? | b'1-31 reserved | + * 8B | File-type | reserved | + * \----------------------------------------------------------------------/ + * Start a new file of the provided type + */ +static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, + const u8 **data, bool *check_resp) +{ + int rc; + + *data += 4; + *check_resp = !!(**data & BIT(0)); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "About to start a new file of type %02x\n", **data); + rc = qed_mcp_nvm_put_file_begin(cdev, **data); + *data += 4; + + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x2 [command index] | + * 4B | Length in bytes | + * 8B | b'0: check_response? | b'1-31 reserved | + * 12B | Offset in bytes | + * 16B | Data ... | + * \----------------------------------------------------------------------/ + * Write data as part of a file that was previously started. Data should be + * of length equal to that provided in the message + */ +static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, + const u8 **data, bool *check_resp) +{ + u32 offset, len; + int rc; + + *data += 4; + len = *((u32 *)(*data)); + *data += 4; + *check_resp = !!(**data & BIT(0)); + *data += 4; + offset = *((u32 *)(*data)); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "About to write File-data: %08x bytes to offset %08x\n", + len, offset); + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, + (char *)(*data), len); + *data += len; + + return rc; +} + +/* Binary file format [General header] - + * /----------------------------------------------------------------------\ + * 0B | QED_NVM_SIGNATURE | + * 4B | Length in bytes | + * 8B | Highest command in this batchfile | Reserved | + * \----------------------------------------------------------------------/ + */ +static int qed_nvm_flash_image_validate(struct qed_dev *cdev, + const struct firmware *image, + const u8 **data) +{ + u32 signature, len; + + /* Check minimum size */ + if (image->size < 12) { + DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); + return -EINVAL; + } + + /* Check signature */ + signature = *((u32 *)(*data)); + if (signature != QED_NVM_SIGNATURE) { + DP_ERR(cdev, "Wrong signature '%08x'\n", signature); + return -EINVAL; + } + + *data += 4; + /* Validate internal size equals the image-size */ + len = *((u32 *)(*data)); + if (len != image->size) { + DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", + len, (u32)image->size); + return -EINVAL; + } + + *data += 4; + /* Make sure driver familiar with all commands necessary for this */ + if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { + DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", + *((u16 *)(*data))); + return -EINVAL; + } + + *data += 4; + + return 0; +} + +static int qed_nvm_flash(struct qed_dev *cdev, const char *name) +{ + const struct firmware *image; + const u8 *data, *data_end; + u32 cmd_type; + int rc; + + rc = request_firmware(&image, name, &cdev->pdev->dev); + if (rc) { + DP_ERR(cdev, "Failed to find '%s'\n", name); + return rc; + } + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Flashing '%s' - firmware's data at %p, size is %08x\n", + name, image->data, (u32)image->size); + data = image->data; + data_end = data + image->size; + + rc = qed_nvm_flash_image_validate(cdev, image, &data); + if (rc) + goto exit; + + while (data < data_end) { + bool check_resp = false; + + /* Parse the actual command */ + cmd_type = *((u32 *)data); + switch (cmd_type) { + case QED_NVM_FLASH_CMD_FILE_DATA: + rc = qed_nvm_flash_image_file_data(cdev, &data, + &check_resp); + break; + case QED_NVM_FLASH_CMD_FILE_START: + rc = qed_nvm_flash_image_file_start(cdev, &data, + &check_resp); + break; + case QED_NVM_FLASH_CMD_NVM_CHANGE: + rc = qed_nvm_flash_image_access(cdev, &data, + &check_resp); + break; + default: + DP_ERR(cdev, "Unknown command %08x\n", cmd_type); + rc = -EINVAL; + goto exit; + } + + if (rc) { + DP_ERR(cdev, "Command %08x failed\n", cmd_type); + goto exit; + } + + /* Check response if needed */ + if (check_resp) { + u32 mcp_response = 0; + + if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { + DP_ERR(cdev, "Failed getting MCP response\n"); + rc = -EINVAL; + goto exit; + } + + switch (mcp_response & FW_MSG_CODE_MASK) { + case FW_MSG_CODE_OK: + case FW_MSG_CODE_NVM_OK: + case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: + case FW_MSG_CODE_PHY_OK: + break; + default: + DP_ERR(cdev, "MFW returns error: %08x\n", + mcp_response); + rc = -EINVAL; + goto exit; + } + } + } + +exit: + release_firmware(image); + + return rc; +} + static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len) { @@ -1719,6 +2056,7 @@ const struct qed_common_ops qed_common_ops_pass = { .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .nvm_flash = &qed_nvm_flash, .nvm_get_image = &qed_nvm_get_image, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6f46cb11f349..ec0d425766a7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -569,6 +569,31 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_src = i_buf; + mb_params.data_src_size = (u8)i_txn_size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + return 0; +} + int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, @@ -2261,6 +2286,102 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) return rc; } +int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp)); + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + u32 resp, param; + int rc; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, + &resp, ¶m); + cdev->mcp_nvm_resp = resp; + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +int qed_mcp_nvm_write(struct qed_dev *cdev, + u32 cmd, u32 addr, u8 *p_buf, u32 len) +{ + u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = -EINVAL; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + switch (cmd) { + case QED_PUT_FILE_DATA: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; + break; + case QED_NVM_WRITE_NVRAM: + nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + break; + default: + DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd); + rc = -EINVAL; + goto out; + } + + while (buf_idx < len) { + buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); + nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | + addr) + buf_idx; + rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, + &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc) { + DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_OK && + resp != FW_MSG_CODE_NVM_OK && + resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { + DP_NOTICE(cdev, + "nvm write failed, resp = 0x%08x\n", resp); + rc = -EINVAL; + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't pre-emptable. Sleep a bit to prevent CPU hogging. + */ + if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) + usleep_range(1000, 2000); + + buf_idx += buf_size; + } + + cdev->mcp_nvm_resp = resp; +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; @@ -2303,9 +2424,9 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return rc; } -int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *num_images) +int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images) { u32 drv_mb_param = 0, rsp; int rc = 0; @@ -2324,10 +2445,10 @@ int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct bist_nvm_image_att *p_image_att, - u32 image_index) +int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index) { u32 buf_size = 0, param, resp = 0, resp_param = 0; int rc; @@ -2351,16 +2472,71 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, return rc; } +int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) +{ + struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; + struct qed_ptt *p_ptt; + int rc; + u32 i; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + + /* Acquire from MFW the amount of available images */ + nvm_info->num_images = 0; + rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, + p_ptt, &nvm_info->num_images); + if (rc == -EOPNOTSUPP) { + DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); + goto out; + } else if (rc || !nvm_info->num_images) { + DP_ERR(p_hwfn, "Failed getting number of images\n"); + goto err0; + } + + nvm_info->image_att = kmalloc(nvm_info->num_images * + sizeof(struct bist_nvm_image_att), + GFP_KERNEL); + if (!nvm_info->image_att) { + rc = -ENOMEM; + goto err0; + } + + /* Iterate over images and get their attributes */ + for (i = 0; i < nvm_info->num_images; i++) { + rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, + &nvm_info->image_att[i], i); + if (rc) { + DP_ERR(p_hwfn, + "Failed getting image index %d attributes\n", i); + goto err1; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, + nvm_info->image_att[i].len); + } +out: + qed_ptt_release(p_hwfn, p_ptt); + return 0; + +err1: + kfree(nvm_info->image_att); +err0: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + static int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { - struct bist_nvm_image_att mfw_image_att; enum nvm_image_type type; - u32 num_images, i; - int rc; + u32 i; /* Translate image_id into MFW definitions */ switch (image_id) { @@ -2376,29 +2552,18 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - /* Learn number of images, then traverse and see if one fits */ - rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); - if (rc || !num_images) - return -EINVAL; - - for (i = 0; i < num_images; i++) { - rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, - &mfw_image_att, i); - if (rc) - return rc; - - if (type == mfw_image_att.image_type) + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) + if (type == p_hwfn->nvm_info.image_att[i].image_type) break; - } - if (i == num_images) { + if (i == p_hwfn->nvm_info.num_images) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, "Failed to find nvram image of type %08x\n", image_id); - return -EINVAL; + return -ENOENT; } - p_image_att->start_addr = mfw_image_att.nvm_start_addr; - p_image_att->length = mfw_image_att.len; + p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; + p_image_att->length = p_hwfn->nvm_info.image_att[i].len; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index c7ec2395d1ce..8a5c988d0c3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -443,6 +443,40 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); +/** + * @brief Write to nvm + * + * @param cdev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_nvm_write(struct qed_dev *cdev, + u32 cmd, u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Put file begin + * + * @param cdev + * @param addr - nvm offset + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr); + +/** + * @brief Check latest response + * + * @param cdev + * @param p_buf - nvm write buffer + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); + struct qed_nvm_image_att { u32 start_addr; u32 length; @@ -496,9 +530,9 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, * * @return int - 0 - operation was successful. */ -int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *num_images); +int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images); /** * @brief Bist nvm test - get image attributes by index @@ -510,10 +544,10 @@ int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn, * * @return int - 0 - operation was successful. */ -int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct bist_nvm_image_att *p_image_att, - u32 image_index); +int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index); /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn @@ -957,4 +991,12 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); * @param p_ptt */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief Populate the nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 5d040b873137..a411f9c702a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); qed_rdma_free_reserved_lkey(p_hwfn); + qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); qed_rdma_resc_free(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 1bafc05db2b8..cf1d4476f9d8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -125,10 +125,11 @@ int qed_selftest_nvram(struct qed_dev *cdev) } /* Acquire from MFW the amount of available images */ - rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &num_images); if (rc || !num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); - return -EINVAL; + rc = -EINVAL; + goto err0; } /* Iterate over images and validate CRC */ @@ -136,8 +137,8 @@ int qed_selftest_nvram(struct qed_dev *cdev) /* This mailbox returns information about the image required for * reading it. */ - rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, - &image_att, i); + rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, + &image_att, i); if (rc) { DP_ERR(p_hwfn, "Failed getting image index %d attributes\n", diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 4ca3847fffd4..ecbf1ded7a39 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -699,6 +699,14 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static int qede_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + struct qede_dev *edev = netdev_priv(dev); + + return edev->ops->common->nvm_flash(edev->cdev, flash->data); +} + static int qede_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { @@ -1806,6 +1814,7 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, + .flash_device = qede_flash_device, }; static const struct ethtool_ops qede_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index dafc079ab6b9..14941303189d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. + /* Fence required to flush the write combined buffer, since another + * CPU may write to the same doorbell address and data may be lost + * due to relaxed order nature of write combined bar. */ - mmiowb(); + wmb(); } static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, @@ -1249,16 +1247,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev, csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); + else rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } } /* Basic validation passed; Need to prepare an SKB. This would also diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 2db70eabddfe..a01e7d6e5442 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -288,7 +288,7 @@ int __init qede_init(void) } /* Must register notifier before pci ops, since we might miss - * interface rename after pci probe and netdev registeration. + * interface rename after pci probe and netdev registration. */ ret = register_netdevice_notifier(&qede_netdev_notifier); if (ret) { @@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err3; - /* Prepare the lock prior to the registeration of the netdev, + /* Prepare the lock prior to the registration of the netdev, * as once it's registered we might reach flows requiring it * [it's even possible to reach a flow needing it directly * from there, although it's unlikely]. @@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - qede_rdma_dev_event_open(edev); - edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); @@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link) DP_NOTICE(edev, "Link is up\n"); netif_tx_start_all_queues(edev->ndev); netif_carrier_on(edev->ndev); + qede_rdma_dev_event_open(edev); } } else { if (netif_carrier_ok(edev->ndev)) { DP_NOTICE(edev, "Link is down\n"); netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); + qede_rdma_dev_event_close(edev); } } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 9b2280badaf7..02adb513f475 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { rc = -EINVAL; - DP_ERR(edev, "PTP clock registeration failed\n"); + DP_ERR(edev, "PTP clock registration failed\n"); goto err2; } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 9e5264d8773b..b48f76182049 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1858,8 +1858,9 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) qdev->small_buf_release_cnt -= 8; } wmb(); - writel(qdev->small_buf_q_producer_index, - &port_regs->CommonRegs.rxSmallQProducerIndex); + writel_relaxed(qdev->small_buf_q_producer_index, + &port_regs->CommonRegs.rxSmallQProducerIndex); + mmiowb(); } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 46b0372dd032..97c146e7698a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -478,7 +478,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) wmb(); /* clear the interrupt trigger control register */ - writel(0, adapter->isr_int_vec); + writel_relaxed(0, adapter->isr_int_vec); intr_val = readl(adapter->isr_int_vec); do { intr_val = readl(adapter->tgt_status_reg); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 287d89dd086f..891f03a7a33d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1175,81 +1175,81 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, } static const struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = qlcnic_show_bridged_mode, - .store = qlcnic_store_bridged_mode, + .attr = { .name = "bridged_mode", .mode = 0644 }, + .show = qlcnic_show_bridged_mode, + .store = qlcnic_store_bridged_mode, }; static const struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "diag_mode", .mode = 0644 }, .show = qlcnic_show_diag_mode, .store = qlcnic_store_diag_mode, }; static const struct device_attribute dev_attr_beacon = { - .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "beacon", .mode = 0644 }, .show = qlcnic_show_beacon, .store = qlcnic_store_beacon, }; static const struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "crb", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_crb, .write = qlcnic_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "mem", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_mem, .write = qlcnic_sysfs_write_mem, }; static const struct bin_attribute bin_attr_npar_config = { - .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "npar_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_npar_config, .write = qlcnic_sysfs_write_npar_config, }; static const struct bin_attribute bin_attr_pci_config = { - .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "pci_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_pci_config, .write = NULL, }; static const struct bin_attribute bin_attr_port_stats = { - .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "port_stats", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_get_port_stats, .write = qlcnic_sysfs_clear_port_stats, }; static const struct bin_attribute bin_attr_esw_stats = { - .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "esw_stats", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_get_esw_stats, .write = qlcnic_sysfs_clear_esw_stats, }; static const struct bin_attribute bin_attr_esw_config = { - .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "esw_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_esw_config, .write = qlcnic_sysfs_write_esw_config, }; static const struct bin_attribute bin_attr_pm_config = { - .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "pm_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_pm_config, .write = qlcnic_sysfs_write_pm_config, }; static const struct bin_attribute bin_attr_flash = { - .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, + .attr = { .name = "flash", .mode = 0644 }, .size = 0, .read = qlcnic_83xx_sysfs_flash_read_handler, .write = qlcnic_83xx_sysfs_flash_write_handler, @@ -1276,7 +1276,7 @@ static ssize_t qlcnic_hwmon_show_temp(struct device *dev, } /* hwmon-sysfs attributes */ -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, +static SENSOR_DEVICE_ATTR(temp1_input, 0444, qlcnic_hwmon_show_temp, NULL, 1); static struct attribute *qlcnic_hwmon_attrs[] = { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 84ac50f92c9c..3e71b65a9546 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2185,6 +2185,22 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr) } /* + * Doorbell Registers: + * Doorbell registers are virtual registers in the PCI memory space. + * The space is allocated by the chip during PCI initialization. The + * device driver finds the doorbell address in BAR 3 in PCI config space. + * The registers are used to control outbound and inbound queues. For + * example, the producer index for an outbound queue. Each queue uses + * 1 4k chunk of memory. The lower half of the space is for outbound + * queues. The upper half is for inbound queues. + * Caller has to guarantee ordering. + */ +static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr) +{ + writel_relaxed(val, addr); +} + +/* * Shadow Registers: * Outbound queues have a consumer index that is maintained by the chip. * Inbound queues have a producer index that is maintained by the chip. diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 50038d946ced..8293c2028002 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2700,7 +2700,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) tx_ring->prod_idx = 0; wmb(); - ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); + ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); + mmiowb(); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 9cbb27263742..d5a32b7c7dc5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) while (tx_q->tpd.consume_idx != hw_consume_idx) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); if (tpbuf->dma_addr) { - dma_unmap_single(adpt->netdev->dev.parent, - tpbuf->dma_addr, tpbuf->length, - DMA_TO_DEVICE); + dma_unmap_page(adpt->netdev->dev.parent, + tpbuf->dma_addr, tpbuf->length, + DMA_TO_DEVICE); tpbuf->dma_addr = 0; } @@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data, tpbuf->length, - DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data), + offset_in_page(skb->data), + tpbuf->length, + DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) @@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, if (mapped_len < len) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = len - mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data + mapped_len, - tpbuf->length, DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data + + mapped_len), + offset_in_page(skb->data + + mapped_len), + tpbuf->length, DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 92b6be9c4429..51d89c86e60f 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -151,7 +151,7 @@ qcaspi_init_device_debugfs(struct qcaspi *qca) dev_name(&qca->net_dev->dev)); return; } - debugfs_create_file("info", S_IFREG | S_IRUGO, device_root, qca, + debugfs_create_file("info", S_IFREG | 0444, device_root, qca, &qcaspi_info_ops); } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 9c236298fe21..5803cd6db406 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -705,7 +705,6 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); if (!tskb) { - netdev_dbg(qca->net_dev, "could not allocate tx_buff\n"); qca->stats.out_of_mem++; return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 7e7704daf5f1..d33988570217 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,10 +43,9 @@ /* Local Definitions and Declarations */ -struct rmnet_walk_data { - struct net_device *real_dev; - struct list_head *head; - struct rmnet_port *port; +static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = { + [IFLA_RMNET_MUX_ID] = { .type = NLA_U16 }, + [IFLA_RMNET_FLAGS] = { .len = sizeof(struct ifla_rmnet_flags) }, }; static int rmnet_is_real_dev_registered(const struct net_device *real_dev) @@ -112,17 +111,14 @@ static int rmnet_register_real_device(struct net_device *real_dev) static void rmnet_unregister_bridge(struct net_device *dev, struct rmnet_port *port) { - struct net_device *rmnet_dev, *bridge_dev; struct rmnet_port *bridge_port; + struct net_device *bridge_dev; if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) return; /* bridge slave handling */ if (!port->nr_rmnet_devs) { - rmnet_dev = netdev_master_upper_dev_get_rcu(dev); - netdev_upper_dev_unlink(dev, rmnet_dev); - bridge_dev = port->bridge_ep; bridge_port = rmnet_get_port_rtnl(bridge_dev); @@ -132,9 +128,6 @@ static void rmnet_unregister_bridge(struct net_device *dev, bridge_dev = port->bridge_ep; bridge_port = rmnet_get_port_rtnl(bridge_dev); - rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); - netdev_upper_dev_unlink(bridge_dev, rmnet_dev); - rmnet_unregister_real_device(bridge_dev, bridge_port); } } @@ -143,7 +136,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION; + u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION; struct net_device *real_dev; int mode = RMNET_EPMODE_VND; struct rmnet_endpoint *ep; @@ -155,14 +148,14 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (!real_dev || !dev) return -ENODEV; - if (!data[IFLA_VLAN_ID]) + if (!data[IFLA_RMNET_MUX_ID]) return -EINVAL; ep = kzalloc(sizeof(*ep), GFP_ATOMIC); if (!ep) return -ENOMEM; - mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); err = rmnet_register_real_device(real_dev); if (err) @@ -173,18 +166,14 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err1; - err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); - if (err) - goto err2; - port->rmnet_mode = mode; hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); - if (data[IFLA_VLAN_FLAGS]) { - struct ifla_vlan_flags *flags; + if (data[IFLA_RMNET_FLAGS]) { + struct ifla_rmnet_flags *flags; - flags = nla_data(data[IFLA_VLAN_FLAGS]); + flags = nla_data(data[IFLA_RMNET_FLAGS]); data_format = flags->flags & flags->mask; } @@ -193,8 +182,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, return 0; -err2: - rmnet_vnd_dellink(mux_id, port, ep); err1: rmnet_unregister_real_device(real_dev, port); err0: @@ -204,14 +191,13 @@ err0: static void rmnet_dellink(struct net_device *dev, struct list_head *head) { + struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; struct rmnet_endpoint *ep; struct rmnet_port *port; u8 mux_id; - rcu_read_lock(); - real_dev = netdev_master_upper_dev_get_rcu(dev); - rcu_read_unlock(); + real_dev = priv->real_dev; if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) return; @@ -219,7 +205,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) port = rmnet_get_port_rtnl(real_dev); mux_id = rmnet_vnd_get_mux(dev); - netdev_upper_dev_unlink(dev, real_dev); ep = rmnet_get_endpoint(port, mux_id); if (ep) { @@ -233,30 +218,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(dev, head); } -static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) -{ - struct rmnet_walk_data *d = data; - struct rmnet_endpoint *ep; - u8 mux_id; - - mux_id = rmnet_vnd_get_mux(rmnet_dev); - ep = rmnet_get_endpoint(d->port, mux_id); - if (ep) { - hlist_del_init_rcu(&ep->hlnode); - rmnet_vnd_dellink(mux_id, d->port, ep); - kfree(ep); - } - netdev_upper_dev_unlink(rmnet_dev, d->real_dev); - unregister_netdevice_queue(rmnet_dev, d->head); - - return 0; -} - static void rmnet_force_unassociate_device(struct net_device *dev) { struct net_device *real_dev = dev; - struct rmnet_walk_data d; + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; struct rmnet_port *port; + unsigned long bkt_ep; LIST_HEAD(list); if (!rmnet_is_real_dev_registered(real_dev)) @@ -264,16 +232,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev) ASSERT_RTNL(); - d.real_dev = real_dev; - d.head = &list; - port = rmnet_get_port_rtnl(dev); - d.port = port; rcu_read_lock(); rmnet_unregister_bridge(dev, port); - netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + unregister_netdevice_queue(ep->egress_dev, &list); + rmnet_vnd_dellink(ep->mux_id, port, ep); + + hlist_del_init_rcu(&ep->hlnode); + kfree(ep); + } + rcu_read_unlock(); unregister_netdevice_many(&list); @@ -310,10 +281,10 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], { u16 mux_id; - if (!data || !data[IFLA_VLAN_ID]) + if (!data || !data[IFLA_RMNET_MUX_ID]) return -EINVAL; - mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) return -ERANGE; @@ -338,9 +309,11 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], port = rmnet_get_port_rtnl(real_dev); - if (data[IFLA_VLAN_ID]) { - mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; hlist_del_init_rcu(&ep->hlnode); hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); @@ -349,10 +322,10 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], priv->mux_id = mux_id; } - if (data[IFLA_VLAN_FLAGS]) { - struct ifla_vlan_flags *flags; + if (data[IFLA_RMNET_FLAGS]) { + struct ifla_rmnet_flags *flags; - flags = nla_data(data[IFLA_VLAN_FLAGS]); + flags = nla_data(data[IFLA_RMNET_FLAGS]); port->data_format = flags->flags & flags->mask; } @@ -361,13 +334,45 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], static size_t rmnet_get_size(const struct net_device *dev) { - return nla_total_size(2) /* IFLA_VLAN_ID */ + - nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */ + return + /* IFLA_RMNET_MUX_ID */ + nla_total_size(2) + + /* IFLA_RMNET_FLAGS */ + nla_total_size(sizeof(struct ifla_rmnet_flags)); +} + +static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; + struct ifla_rmnet_flags f; + struct rmnet_port *port; + + real_dev = priv->real_dev; + + if (!rmnet_is_real_dev_registered(real_dev)) + return -ENODEV; + + if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) + goto nla_put_failure; + + port = rmnet_get_port_rtnl(real_dev); + + f.flags = port->data_format; + f.mask = ~0; + + if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; } struct rtnl_link_ops rmnet_link_ops __read_mostly = { .kind = "rmnet", - .maxtype = __IFLA_VLAN_MAX, + .maxtype = __IFLA_RMNET_MAX, .priv_size = sizeof(struct rmnet_priv), .setup = rmnet_vnd_setup, .validate = rmnet_rtnl_validate, @@ -375,6 +380,8 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = { .dellink = rmnet_dellink, .get_size = rmnet_get_size, .changelink = rmnet_changelink, + .policy = rmnet_policy, + .fill_info = rmnet_fill_info, }; /* Needs either rcu_read_lock() or rtnl lock */ @@ -422,11 +429,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (err) return -EBUSY; - err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, - extack); - if (err) - return -EINVAL; - slave_port = rmnet_get_port(slave_dev); slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; slave_port->bridge_ep = real_dev; @@ -449,7 +451,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, port->rmnet_mode = RMNET_EPMODE_VND; port->bridge_ep = NULL; - netdev_upper_dev_unlink(slave_dev, rmnet_dev); slave_port = rmnet_get_port(slave_dev); rmnet_unregister_real_device(slave_dev, slave_port); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 00e4634100d3..0b5b5da80198 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 601edec28c5f..6fcd586e9804 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -70,7 +70,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { - if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS) + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; @@ -93,7 +93,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb_pull(skb, sizeof(struct rmnet_map_header)); rmnet_set_skb_proto(skb); - if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -121,7 +121,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb, skb_push(skb, ETH_HLEN); } - if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { + if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) __rmnet_map_ingress_handler(skbn, port); @@ -141,7 +141,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); - if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) { + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { additional_header_len = sizeof(struct rmnet_map_ul_csum_header); required_headroom += additional_header_len; } @@ -151,7 +151,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, goto fail; } - if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) rmnet_map_checksum_uplink_packet(skb, orig_dev); map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 6ce31e29136d..884f1f52dcc2 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,8 +23,8 @@ struct rmnet_map_control_command { struct { u16 ip_family:2; u16 reserved:14; - u16 flow_control_seq_num; - u32 qos_id; + __be16 flow_control_seq_num; + __be32 qos_id; } flow_control; u8 data[0]; }; @@ -44,7 +44,7 @@ struct rmnet_map_header { u8 reserved_bit:1; u8 cd_bit:1; u8 mux_id; - u16 pkt_len; + __be16 pkt_len; } __aligned(1); struct rmnet_map_dl_csum_trailer { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 6bc328fb88e1..78fdad0c6f76 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, } ep = rmnet_get_endpoint(port, mux_id); + if (!ep) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + vnd = ep->egress_dev; ip_family = cmd->flow_control.ip_family; @@ -64,7 +69,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_map_control_command *cmd; int xmit_status; - if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { if (skb->len < sizeof(struct rmnet_map_header) + RMNET_MAP_GET_LENGTH(skb) + sizeof(struct rmnet_map_dl_csum_trailer)) { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index c74a6c56d315..a6ea09416f8d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -309,7 +309,7 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, maph = (struct rmnet_map_header *)skb->data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); - if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) packet_len += sizeof(struct rmnet_map_dl_csum_trailer); if (((int)skb->len - (int)packet_len) < 0) @@ -323,7 +323,6 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, if (!skbn) return NULL; - skbn->dev = skb->dev; skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); skb_put(skbn, packet_len); memcpy(skbn->data, skb->data, packet_len); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index de0143eaa05a..b9cc4f85f229 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,12 +18,6 @@ #define RMNET_NEEDED_HEADROOM 16 #define RMNET_TX_QUEUE_LEN 1000 -/* Constants */ -#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0) -#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1) -#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2) -#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3) - /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) /* Pass the frame directly to another device with dev_queue_xmit() */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 570a227acdd8..2ea16a088de8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev, memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); for_each_possible_cpu(cpu) { - pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); do { start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0bf7d1759250..604ae78381ae 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -99,12 +99,12 @@ static const int multicast_filter_limit = 32; #define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ -#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) -#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) -#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) -#define RTL_R8(reg) readb (ioaddr + (reg)) -#define RTL_R16(reg) readw (ioaddr + (reg)) -#define RTL_R32(reg) readl (ioaddr + (reg)) +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) enum mac_version { RTL_GIGA_MAC_VER_01 = 0, @@ -735,12 +735,6 @@ struct ring_info { u8 __pad[sizeof(void *) - sizeof(u32)]; }; -enum features { - RTL_FEATURE_WOL = (1 << 0), - RTL_FEATURE_MSI = (1 << 1), - RTL_FEATURE_GMII = (1 << 2), -}; - struct rtl8169_counters { __le64 tx_packets; __le64 rx_packets; @@ -829,7 +823,7 @@ struct rtl8169_private { void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct net_device *); unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(void __iomem *); + unsigned int (*link_ok)(struct rtl8169_private *tp); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); @@ -893,6 +887,11 @@ MODULE_FIRMWARE(FIRMWARE_8168H_2); MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2); +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + static void rtl_lock_work(struct rtl8169_private *tp) { mutex_lock(&tp->wk.mutex); @@ -903,9 +902,9 @@ static void rtl_unlock_work(struct rtl8169_private *tp) mutex_unlock(&tp->wk.mutex); } -static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) { - pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, force); } @@ -984,56 +983,46 @@ static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) DECLARE_RTL_COND(rtl_ocp_gphy_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(GPHY_OCP) & OCPAR_FLAG; + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; } static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return; - RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); } static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return 0; - RTL_W32(GPHY_OCP, reg << 15); + RTL_W32(tp, GPHY_OCP, reg << 15); return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? - (RTL_R32(GPHY_OCP) & 0xffff) : ~0; + (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0; } static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return; - RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); } static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_ocp_reg_failure(tp, reg)) return 0; - RTL_W32(OCPDR, reg << 15); + RTL_W32(tp, OCPDR, reg << 15); - return RTL_R32(OCPDR); + return RTL_R32(tp, OCPDR); } #define OCP_STD_PHY_BASE 0xa400 @@ -1076,16 +1065,12 @@ static int mac_mcu_read(struct rtl8169_private *tp, int reg) DECLARE_RTL_COND(rtl_phyar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(PHYAR) & 0x80000000; + return RTL_R32(tp, PHYAR) & 0x80000000; } static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); /* @@ -1097,13 +1082,12 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) static int r8169_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; int value; - RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? - RTL_R32(PHYAR) & 0xffff : ~0; + RTL_R32(tp, PHYAR) & 0xffff : ~0; /* * According to hardware specs a 20us delay is required after read @@ -1116,18 +1100,14 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg) DECLARE_RTL_COND(rtl_ocpar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(OCPAR) & OCPAR_FLAG; + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; } static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(EPHY_RXER_NUM, 0); + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); } @@ -1140,51 +1120,46 @@ static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); mdelay(1); - RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(EPHY_RXER_NUM, 0); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0; } #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 -static void r8168dp_2_mdio_start(void __iomem *ioaddr) +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) { - RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); } -static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) { - RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); } static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - r8168dp_2_mdio_start(ioaddr); + r8168dp_2_mdio_start(tp); r8169_mdio_write(tp, reg, value); - r8168dp_2_mdio_stop(ioaddr); + r8168dp_2_mdio_stop(tp); } static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) { - void __iomem *ioaddr = tp->mmio_addr; int value; - r8168dp_2_mdio_start(ioaddr); + r8168dp_2_mdio_start(tp); value = r8169_mdio_read(tp, reg); - r8168dp_2_mdio_stop(ioaddr); + r8168dp_2_mdio_stop(tp); return value; } @@ -1229,16 +1204,12 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) DECLARE_RTL_COND(rtl_ephyar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(EPHYAR) & EPHYAR_FLAG; + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; } static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); @@ -1248,41 +1219,33 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? - RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; } DECLARE_RTL_COND(rtl_eriar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(ERIAR) & ERIAR_FLAG; + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; } static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, u32 val, int type) { - void __iomem *ioaddr = tp->mmio_addr; - BUG_ON((addr & 3) || (mask == 0)); - RTL_W32(ERIDR, val); - RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + RTL_W32(tp, ERIDR, val); + RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); } static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? - RTL_R32(ERIDR) : ~0; + RTL_R32(tp, ERIDR) : ~0; } static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, @@ -1296,11 +1259,9 @@ static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? - RTL_R32(OCPDR) : ~0; + RTL_R32(tp, OCPDR) : ~0; } static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) @@ -1328,10 +1289,8 @@ static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPDR, data); - RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); } @@ -1393,19 +1352,15 @@ DECLARE_RTL_COND(rtl_ep_ocp_read_cond) DECLARE_RTL_COND(rtl_ocp_tx_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(IBISR0) & 0x20; + return RTL_R8(tp, IBISR0) & 0x20; } static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); - RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); - RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); } static void rtl8168dp_driver_start(struct rtl8169_private *tp) @@ -1473,19 +1428,19 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp) } } -static int r8168dp_check_dash(struct rtl8169_private *tp) +static bool r8168dp_check_dash(struct rtl8169_private *tp) { u16 reg = rtl8168_get_ocp_reg(tp); - return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; + return !!(ocp_read(tp, 0x0f, reg) & 0x00008000); } -static int r8168ep_check_dash(struct rtl8169_private *tp) +static bool r8168ep_check_dash(struct rtl8169_private *tp) { - return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0; + return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001); } -static int r8168_check_dash(struct rtl8169_private *tp) +static bool r8168_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_27: @@ -1497,7 +1452,7 @@ static int r8168_check_dash(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_51: return r8168ep_check_dash(tp); default: - return 0; + return false; } } @@ -1518,49 +1473,37 @@ static void rtl_write_exgmac_batch(struct rtl8169_private *tp, DECLARE_RTL_COND(rtl_efusear_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; } static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? - RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; } static u16 rtl_get_events(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R16(IntrStatus); + return RTL_R16(tp, IntrStatus); } static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrStatus, bits); + RTL_W16(tp, IntrStatus, bits); mmiowb(); } static void rtl_irq_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrMask, 0); + RTL_W16(tp, IntrMask, 0); mmiowb(); } static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W16(IntrMask, bits); + RTL_W16(tp, IntrMask, bits); } #define RTL_EVENT_NAPI_RX (RxOK | RxErr) @@ -1574,18 +1517,14 @@ static void rtl_irq_enable_all(struct rtl8169_private *tp) static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_irq_disable(tp); rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); - RTL_R8(ChipCmd); + RTL_R8(tp, ChipCmd); } static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(TBICSR) & TBIReset; + return RTL_R32(tp, TBICSR) & TBIReset; } static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) @@ -1593,21 +1532,19 @@ static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; } -static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) { - return RTL_R32(TBICSR) & TBILinkOk; + return RTL_R32(tp, TBICSR) & TBILinkOk; } -static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) { - return RTL_R8(PHYstatus) & LinkStatus; + return RTL_R8(tp, PHYstatus) & LinkStatus; } static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); + RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); } static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) @@ -1620,7 +1557,6 @@ static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) static void rtl_link_chg_patch(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; struct net_device *dev = tp->dev; if (!netif_running(dev)) @@ -1628,12 +1564,12 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(PHYstatus) & _1000bpsF) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(PHYstatus) & _100bps) { + } else if (RTL_R8(tp, PHYstatus) & _100bps) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1651,7 +1587,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(PHYstatus) & _1000bpsF) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1663,7 +1599,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(PHYstatus) & _10bps) { + if (RTL_R8(tp, PHYstatus) & _10bps) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, @@ -1676,20 +1612,21 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) + struct rtl8169_private *tp) { - if (tp->link_ok(ioaddr)) { + struct device *d = tp_to_dev(tp); + + if (tp->link_ok(tp)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(&tp->pci_dev->dev); + pm_request_resume(d); netif_carrier_on(dev); if (net_ratelimit()) netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(&tp->pci_dev->dev); + pm_runtime_idle(d); } } @@ -1697,15 +1634,14 @@ static void rtl8169_check_link_status(struct net_device *dev, static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u8 options; u32 wolopts = 0; - options = RTL_R8(Config1); + options = RTL_R8(tp, Config1); if (!(options & PMEnable)) return 0; - options = RTL_R8(Config3); + options = RTL_R8(tp, Config3); if (options & LinkUp) wolopts |= WAKE_PHY; switch (tp->mac_version) { @@ -1735,7 +1671,7 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) break; } - options = RTL_R8(Config5); + options = RTL_R8(tp, Config5); if (options & UWF) wolopts |= WAKE_UCAST; if (options & BWF) @@ -1749,7 +1685,7 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); pm_runtime_get_noresume(d); @@ -1768,7 +1704,6 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { - void __iomem *ioaddr = tp->mmio_addr; unsigned int i, tmp; static const struct { u32 opt; @@ -1784,7 +1719,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) }; u8 options; - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: @@ -1826,43 +1761,39 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) } for (i = 0; i < tmp; i++) { - options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; - RTL_W8(cfg[i].reg, options); + RTL_W8(tp, cfg[i].reg, options); } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: - options = RTL_R8(Config1) & ~PMEnable; + options = RTL_R8(tp, Config1) & ~PMEnable; if (wolopts) options |= PMEnable; - RTL_W8(Config1, options); + RTL_W8(tp, Config1, options); break; default: - options = RTL_R8(Config2) & ~PME_SIGNAL; + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; - RTL_W8(Config2, options); + RTL_W8(tp, Config2, options); break; } - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); pm_runtime_get_noresume(d); rtl_lock_work(tp); - if (wol->wolopts) - tp->features |= RTL_FEATURE_WOL; - else - tp->features &= ~RTL_FEATURE_WOL; if (pm_runtime_active(d)) __rtl8169_set_wol(tp, wol->wolopts); else @@ -1870,7 +1801,7 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) rtl_unlock_work(tp); - device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + device_set_wakeup_enable(d, wol->wolopts); pm_runtime_put_noidle(d); @@ -1906,16 +1837,15 @@ static int rtl8169_set_speed_tbi(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex, u32 ignored) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; int ret = 0; u32 reg; - reg = RTL_R32(TBICSR); + reg = RTL_R32(tp, TBICSR); if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && (duplex == DUPLEX_FULL)) { - RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); else { netif_warn(tp, link, dev, "incorrect speed setting refused in TBI mode\n"); @@ -2040,16 +1970,15 @@ static void __rtl8169_set_features(struct net_device *dev, netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 rx_config; - rx_config = RTL_R32(RxConfig); + rx_config = RTL_R32(tp, RxConfig); if (features & NETIF_F_RXALL) rx_config |= (AcceptErr | AcceptRunt); else rx_config &= ~(AcceptErr | AcceptRunt); - RTL_W32(RxConfig, rx_config); + RTL_W32(tp, RxConfig, rx_config); if (features & NETIF_F_RXCSUM) tp->cp_cmd |= RxChkSum; @@ -2061,10 +1990,10 @@ static void __rtl8169_set_features(struct net_device *dev, else tp->cp_cmd &= ~RxVlan; - tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum); - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); } static int rtl8169_set_features(struct net_device *dev, @@ -2101,7 +2030,6 @@ static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 status; u32 supported, advertising; @@ -2109,7 +2037,7 @@ static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; cmd->base.port = PORT_FIBRE; - status = RTL_R32(TBICSR); + status = RTL_R32(tp, TBICSR); advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; cmd->base.autoneg = !!(status & TBINwEnable); @@ -2224,23 +2152,20 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset) DECLARE_RTL_COND(rtl_counters_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump); + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); } static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; - RTL_W32(CounterAddrHigh, (u64)paddr >> 32); - RTL_R32(CounterAddrHigh); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(tp, CounterAddrHigh); cmd = (u64)paddr & DMA_BIT_MASK(32); - RTL_W32(CounterAddrLow, cmd); - RTL_W32(CounterAddrLow, cmd | counter_cmd); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } @@ -2262,13 +2187,12 @@ static bool rtl8169_reset_counters(struct net_device *dev) static bool rtl8169_update_counters(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; /* * Some chips are unable to dump tally counters when the receiver * is disabled. */ - if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) return true; return rtl8169_do_counters(dev, CounterDump); @@ -2317,7 +2241,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); struct rtl8169_counters *counters = tp->counters; ASSERT_RTNL(); @@ -2448,7 +2372,6 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; const struct rtl_coalesce_info *ci; const struct rtl_coalesce_scale *scale; struct { @@ -2468,10 +2391,10 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) if (IS_ERR(ci)) return PTR_ERR(ci); - scale = &ci->scalev[RTL_R16(CPlusCmd) & 3]; + scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3]; /* read IntrMitigate and adjust according to scale */ - for (w = RTL_R16(IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { + for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { *p->max_frames = (w & RTL_COALESCE_MASK) << 2; w >>= RTL_COALESCE_SHIFT; *p->usecs = w & RTL_COALESCE_MASK; @@ -2518,7 +2441,6 @@ static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale( static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; const struct rtl_coalesce_scale *scale; struct { u32 frames; @@ -2566,11 +2488,11 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) rtl_lock_work(tp); - RTL_W16(IntrMitigate, swab16(w)); + RTL_W16(tp, IntrMitigate, swab16(w)); tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); rtl_unlock_work(tp); @@ -2600,17 +2522,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { static void rtl8169_get_mac_version(struct rtl8169_private *tp, struct net_device *dev, u8 default_version) { - void __iomem *ioaddr = tp->mmio_addr; /* * The driver currently handles the 8168Bf and the 8168Be identically * but they can be identified more specifically through the test below * if needed: * - * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be * * Same thing for the 8101Eb and the 8101Ec: * - * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec */ static const struct rtl_mac_info { u32 mask; @@ -2708,7 +2629,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, const struct rtl_mac_info *p = mac_info; u32 reg; - reg = RTL_R32(TxConfig); + reg = RTL_R32(tp, TxConfig); while ((reg & p->mask) != p->val) p++; tp->mac_version = p->mac_version; @@ -3805,8 +3726,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* soft-reset phy */ - rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ rtl_rar_exgmac_set(tp, tp->dev->dev_addr); @@ -4591,7 +4510,6 @@ static void rtl_hw_phy_config(struct net_device *dev) static void rtl_phy_work(struct rtl8169_private *tp) { struct timer_list *timer = &tp->timer; - void __iomem *ioaddr = tp->mmio_addr; unsigned long timeout = RTL8169_PHY_TIMEOUT; assert(tp->mac_version > RTL_GIGA_MAC_VER_01); @@ -4605,7 +4523,7 @@ static void rtl_phy_work(struct rtl8169_private *tp) goto out_mod_timer; } - if (tp->link_ok(ioaddr)) + if (tp->link_ok(tp)) return; netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); @@ -4643,21 +4561,17 @@ static void rtl8169_phy_reset(struct net_device *dev, static bool rtl_tbi_enabled(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(0x82, 0x01); + RTL_W8(tp, 0x82, 0x01); } pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); @@ -4667,7 +4581,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_02) { dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(0x82, 0x01); + RTL_W8(tp, 0x82, 0x01); dprintk("Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } @@ -4687,22 +4601,20 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_lock_work(tp); - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W32(MAC4, addr[4] | addr[5] << 8); - RTL_R32(MAC4); + RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); + RTL_R32(tp, MAC4); - RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); - RTL_R32(MAC0); + RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(tp, MAC0); if (tp->mac_version == RTL_GIGA_MAC_VER_34) rtl_rar_exgmac_set(tp, addr); - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); rtl_unlock_work(tp); } @@ -4710,13 +4622,12 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &tp->pci_dev->dev; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + struct device *d = tp_to_dev(tp); + int ret; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + ret = eth_mac_addr(dev, p); + if (ret) + return ret; pm_runtime_get_noresume(d); @@ -4822,8 +4733,6 @@ static void rtl_speed_down(struct rtl8169_private *tp) static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@ -4847,7 +4756,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RTL_R32(RxConfig) | + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; default: @@ -4880,8 +4789,6 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) static void r810x_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - if (rtl_wol_pll_power_down(tp)) return; @@ -4896,15 +4803,13 @@ static void r810x_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_16: break; default: - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } } static void r810x_pll_power_up(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r810x_phy_power_up(tp); switch (tp->mac_version) { @@ -4917,10 +4822,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp) break; case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; default: - RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; } } @@ -4987,21 +4892,12 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) static void r8168_pll_power_down(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) return; - } if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || tp->mac_version == RTL_GIGA_MAC_VER_24) && - (RTL_R16(CPlusCmd) & ASF)) { + (RTL_R16(tp, CPlusCmd) & ASF)) { return; } @@ -5027,22 +4923,20 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, 0xfc000000, ERIAR_EXGMAC); - RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } } static void r8168_pll_power_up(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@ -5051,19 +4945,19 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: - RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: - RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, 0x00000000, ERIAR_EXGMAC); break; @@ -5153,8 +5047,6 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) static void rtl_init_rxcfg(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_01: case RTL_GIGA_MAC_VER_02: @@ -5170,7 +5062,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_15: case RTL_GIGA_MAC_VER_16: case RTL_GIGA_MAC_VER_17: - RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_18: case RTL_GIGA_MAC_VER_19: @@ -5181,7 +5073,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_24: case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: @@ -5195,10 +5087,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); break; } } @@ -5210,102 +5102,82 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.enable); - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.disable); - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); } static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); - rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); } static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); } static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); } static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); } static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(MaxTxPacketSize, 0x3f); - RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) | 0x01); - rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); } static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(MaxTxPacketSize, 0x0c); - RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); - RTL_W8(Config4, RTL_R8(Config4) & ~0x01); - rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + RTL_W8(tp, MaxTxPacketSize, 0x0c); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); } static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) { - rtl_tx_performance_tweak(tp->pci_dev, + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); } static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) { - rtl_tx_performance_tweak(tp->pci_dev, + rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); } static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r8168b_0_hw_jumbo_enable(tp); - RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); } static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - r8168b_0_hw_jumbo_disable(tp); - RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); } static void rtl_init_jumbo_ops(struct rtl8169_private *tp) @@ -5372,16 +5244,12 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) DECLARE_RTL_COND(rtl_chipcmd_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(ChipCmd) & CmdReset; + return RTL_R8(tp, ChipCmd) & CmdReset; } static void rtl_hw_reset(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(ChipCmd, CmdReset); + RTL_W8(tp, ChipCmd, CmdReset); rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); } @@ -5400,7 +5268,7 @@ static void rtl_request_uncached_firmware(struct rtl8169_private *tp) if (!rtl_fw) goto err_warn; - rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp)); if (rc < 0) goto err_free; @@ -5432,29 +5300,21 @@ static void rtl_request_firmware(struct rtl8169_private *tp) static void rtl_rx_close(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); } DECLARE_RTL_COND(rtl_npq_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(TxPoll) & NPQ; + return RTL_R8(tp, TxPoll) & NPQ; } DECLARE_RTL_COND(rtl_txcfg_empty_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(TxConfig) & TXCFG_EMPTY; + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; } static void rtl8169_hw_reset(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Disable interrupts */ rtl8169_irq_mask_and_ack(tp); @@ -5481,10 +5341,10 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_49 || tp->mac_version == RTL_GIGA_MAC_VER_50 || tp->mac_version == RTL_GIGA_MAC_VER_51) { - RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); } else { - RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); udelay(100); } @@ -5493,10 +5353,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Set DMA burst size and Interframe Gap Time */ - RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift)); } @@ -5509,36 +5367,35 @@ static void rtl_hw_start(struct net_device *dev) rtl_irq_enable_all(tp); } -static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, - void __iomem *ioaddr) +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) { /* * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh * register to be written before TxDescAddrLow to work. * Switching from MMIO to I/O access fixes the issue as well. */ - RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); - RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); - RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); - RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); } -static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) { u16 cmd; - cmd = RTL_R16(CPlusCmd); - RTL_W16(CPlusCmd, cmd); + cmd = RTL_R16(tp, CPlusCmd); + RTL_W16(tp, CPlusCmd, cmd); return cmd; } -static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, rx_buf_sz + 1); + RTL_W16(tp, RxMaxSize, rx_buf_sz + 1); } -static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) { static const struct rtl_cfg2_info { u32 mac_version; @@ -5554,10 +5411,10 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) unsigned int i; u32 clk; - clk = RTL_R8(Config2) & PCI_Clock_66MHz; + clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz; for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { if ((p->mac_version == mac_version) && (p->clk == clk)) { - RTL_W32(0x7c, p->val); + RTL_W32(tp, 0x7c, p->val); break; } } @@ -5566,7 +5423,6 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) static void rtl_set_rx_mode(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; u32 tmp = 0; @@ -5598,7 +5454,7 @@ static void rtl_set_rx_mode(struct net_device *dev) if (dev->features & NETIF_F_RXALL) rx_mode |= (AcceptErr | AcceptRunt); - tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; if (tp->mac_version > RTL_GIGA_MAC_VER_06) { u32 data = mc_filter[0]; @@ -5610,35 +5466,34 @@ static void rtl_set_rx_mode(struct net_device *dev) if (tp->mac_version == RTL_GIGA_MAC_VER_35) mc_filter[1] = mc_filter[0] = 0xffffffff; - RTL_W32(MAR0 + 4, mc_filter[1]); - RTL_W32(MAR0 + 0, mc_filter[0]); + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); - RTL_W32(RxConfig, tmp); + RTL_W32(tp, RxConfig, tmp); } static void rtl_hw_start_8169(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; if (tp->mac_version == RTL_GIGA_MAC_VER_05) { - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW); pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); } - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); if (tp->mac_version == RTL_GIGA_MAC_VER_01 || tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03 || tp->mac_version == RTL_GIGA_MAC_VER_04) - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_init_rxcfg(tp); - RTL_W8(EarlyTxThres, NoEarlyTx); + RTL_W8(tp, EarlyTxThres, NoEarlyTx); - rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz); if (tp->mac_version == RTL_GIGA_MAC_VER_01 || tp->mac_version == RTL_GIGA_MAC_VER_02 || @@ -5646,7 +5501,7 @@ static void rtl_hw_start_8169(struct net_device *dev) tp->mac_version == RTL_GIGA_MAC_VER_04) rtl_set_rx_tx_config_registers(tp); - tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW; if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { @@ -5655,37 +5510,37 @@ static void rtl_hw_start_8169(struct net_device *dev) tp->cp_cmd |= (1 << 14); } - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); - rtl8169_set_magic_reg(ioaddr, tp->mac_version); + rtl8169_set_magic_reg(tp, tp->mac_version); /* * Undocumented corner. Supposedly: * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets */ - RTL_W16(IntrMitigate, 0x0000); + RTL_W16(tp, IntrMitigate, 0x0000); - rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp); if (tp->mac_version != RTL_GIGA_MAC_VER_01 && tp->mac_version != RTL_GIGA_MAC_VER_02 && tp->mac_version != RTL_GIGA_MAC_VER_03 && tp->mac_version != RTL_GIGA_MAC_VER_04) { - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); } - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(IntrMask); + RTL_R8(tp, IntrMask); - RTL_W32(RxMissed, 0); + RTL_W32(tp, RxMissed, 0); rtl_set_rx_mode(dev); /* no early-rx interrupts */ - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) @@ -5719,17 +5574,13 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp) DECLARE_RTL_COND(rtl_csiar_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(CSIAR) & CSIAR_FLAG; + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; } static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); @@ -5737,21 +5588,17 @@ static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; } static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | CSIAR_FUNC_NIC); @@ -5760,21 +5607,17 @@ static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; } static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | CSIAR_FUNC_NIC2); @@ -5783,13 +5626,11 @@ static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(CSIDR) : ~0; + RTL_R32(tp, CSIDR) : ~0; } static void rtl_init_csi_ops(struct rtl8169_private *tp) @@ -5851,31 +5692,30 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, } } -static void rtl_disable_clock_request(struct pci_dev *pdev) +static void rtl_disable_clock_request(struct rtl8169_private *tp) { - pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN); } -static void rtl_enable_clock_request(struct pci_dev *pdev) +static void rtl_enable_clock_request(struct rtl8169_private *tp) { - pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN); } static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) { - void __iomem *ioaddr = tp->mmio_addr; u8 data; - data = RTL_R8(Config3); + data = RTL_R8(tp, Config3); if (enable) data |= Rdy_to_L23; else data &= ~Rdy_to_L23; - RTL_W8(Config3, data); + RTL_W8(tp, Config3, data); } #define R8168_CPCMD_QUIRK_MASK (\ @@ -5891,45 +5731,37 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); } } static void rtl_hw_start_8168bef(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - rtl_hw_start_8168bb(tp); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); } static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - - RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) @@ -5951,42 +5783,35 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); /* Magic. */ - RTL_W8(DBG_REG, 0x20); + RTL_W8(tp, DBG_REG, 0x20); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168c_1[] = { { 0x02, 0x0800, 0x1000 }, { 0x03, 0, 0x0002 }, @@ -5995,7 +5820,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) rtl_csi_access_enable_2(tp); - RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); @@ -6030,40 +5855,32 @@ static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_1(tp); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); } static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { { 0x0b, 0x0000, 0x0048 }, { 0x19, 0x0020, 0x0050 }, @@ -6072,19 +5889,17 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4)); - rtl_enable_clock_request(pdev); + rtl_enable_clock_request(tp); } static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_1[] = { { 0x00, 0x0200, 0x0100 }, { 0x00, 0x0000, 0x0004 }, @@ -6106,23 +5921,21 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); /* Reset tx FIFO pointer */ - RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); - RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); } static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, { 0x19, 0x0000, 0x0224 } @@ -6133,7 +5946,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -6144,29 +5957,26 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W8(tp, MaxTxPacketSize, EarlySize); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); /* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); } static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -6179,20 +5989,19 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W8(tp, MaxTxPacketSize, EarlySize); - rtl_disable_clock_request(pdev); + rtl_disable_clock_request(tp); - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); } static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168f_1[] = { { 0x06, 0x00c0, 0x0020 }, { 0x08, 0x0001, 0x0002 }, @@ -6207,7 +6016,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); } static void rtl_hw_start_8411(struct rtl8169_private *tp) @@ -6229,10 +6038,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) static void rtl_hw_start_8168g(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); @@ -6241,20 +6047,20 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); - RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); @@ -6264,7 +6070,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168g_1[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x37d0, 0x0820 }, @@ -6275,14 +6080,13 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168g_2[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, @@ -6293,14 +6097,13 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); } static void rtl_hw_start_8411_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8411_2[] = { { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, @@ -6312,15 +6115,13 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); } static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { @@ -6333,11 +6134,11 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); @@ -6346,7 +6147,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6357,19 +6158,19 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); @@ -6417,12 +6218,9 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) static void rtl_hw_start_8168ep(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl8168ep_stop_cmac(tp); - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); @@ -6431,7 +6229,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6440,25 +6238,24 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); - RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); - RTL_W8(MaxTxPacketSize, EarlySize); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + RTL_W8(tp, MaxTxPacketSize, EarlySize); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ - RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168ep_1[] = { { 0x00, 0xffff, 0x10ab }, { 0x06, 0xffff, 0xf030 }, @@ -6468,8 +6265,8 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); rtl_hw_start_8168ep(tp); @@ -6477,7 +6274,6 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168ep_2[] = { { 0x00, 0xffff, 0x10a3 }, { 0x19, 0xffff, 0xfc00 }, @@ -6485,19 +6281,18 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); rtl_hw_start_8168ep(tp); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u32 data; static const struct ephy_info e_info_8168ep_3[] = { { 0x00, 0xffff, 0x10a3 }, @@ -6507,14 +6302,14 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); - RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); rtl_hw_start_8168ep(tp); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); - RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); data = r8168_mac_ocp_read(tp, 0xd3e2); data &= 0xf000; @@ -6533,19 +6328,18 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz); - tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_W16(IntrMitigate, 0x5151); + RTL_W16(tp, IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { @@ -6553,11 +6347,11 @@ static void rtl_hw_start_8168(struct net_device *dev) tp->event_slow &= ~RxOverflow; } - rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp); rtl_set_rx_tx_config_registers(tp); - RTL_R8(IntrMask); + RTL_R8(tp, IntrMask); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: @@ -6663,13 +6457,13 @@ static void rtl_hw_start_8168(struct net_device *dev) break; } - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_mode(dev); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } #define R810X_CPCMD_QUIRK_MASK (\ @@ -6685,8 +6479,6 @@ static void rtl_hw_start_8168(struct net_device *dev) static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8102e_1[] = { { 0x01, 0, 0x6e65 }, { 0x02, 0, 0x091f }, @@ -6701,32 +6493,29 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) rtl_csi_access_enable_2(tp); - RTL_W8(DBG_REG, FIX_NAK_1); + RTL_W8(tp, DBG_REG, FIX_NAK_1); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W8(Config1, + RTL_W8(tp, Config1, LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - cfg1 = RTL_R8(Config1); + cfg1 = RTL_R8(tp, Config1); if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) - RTL_W8(Config1, cfg1 & ~LEDS0); + RTL_W8(tp, Config1, cfg1 & ~LEDS0); rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); - RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); - RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); } static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) @@ -6738,7 +6527,6 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8105e_1[] = { { 0x07, 0, 0x4000 }, { 0x19, 0, 0x0200 }, @@ -6751,13 +6539,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) }; /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); /* Disable Early Tally Counter */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); - RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); @@ -6772,7 +6560,6 @@ static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) static void rtl_hw_start_8402(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8402[] = { { 0x19, 0xffff, 0xff64 }, { 0x1e, 0, 0x4000 } @@ -6781,14 +6568,14 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_csi_access_enable_2(tp); /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); - rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); @@ -6803,14 +6590,12 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) static void rtl_hw_start_8106(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* Force LAN exit from ASPM if Rx/Tx are not idle */ - RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); - RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); - RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); rtl_pcie_state_l2l3_enable(tp, false); } @@ -6818,7 +6603,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) static void rtl_hw_start_8101(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; if (tp->mac_version >= RTL_GIGA_MAC_VER_30) @@ -6829,16 +6613,16 @@ static void rtl_hw_start_8101(struct net_device *dev) pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN); - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(MaxTxPacketSize, TxPacketMax); + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(ioaddr, rx_buf_sz); + rtl_set_rx_max_size(tp, rx_buf_sz); tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); - rtl_set_rx_tx_desc_registers(tp, ioaddr); + rtl_set_rx_tx_desc_registers(tp); rtl_set_rx_tx_config_registers(tp); @@ -6878,17 +6662,17 @@ static void rtl_hw_start_8101(struct net_device *dev) break; } - RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); - RTL_W16(IntrMitigate, 0x0000); + RTL_W16(tp, IntrMitigate, 0x0000); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_mode(dev); - RTL_R8(IntrMask); + RTL_R8(tp, IntrMask); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@ -6915,7 +6699,7 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, void **data_buff, struct RxDesc *desc) { - dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), rx_buf_sz, DMA_FROM_DEVICE); kfree(*data_buff); @@ -6950,7 +6734,7 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, { void *data; dma_addr_t mapping; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); struct net_device *dev = tp->dev; int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; @@ -7062,7 +6846,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, if (len) { struct sk_buff *skb = tx_skb->skb; - rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); if (skb) { dev_consume_skb_any(skb); @@ -7098,7 +6882,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_enable(&tp->napi); rtl_hw_start(dev); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp, tp->mmio_addr); + rtl8169_check_link_status(dev, tp); } static void rtl8169_tx_timeout(struct net_device *dev) @@ -7114,7 +6898,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; struct TxDesc *uninitialized_var(txd); - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); entry = tp->cur_tx; for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { @@ -7346,8 +7130,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct rtl8169_private *tp = netdev_priv(dev); unsigned int entry = tp->cur_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescArray + entry; - void __iomem *ioaddr = tp->mmio_addr; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 status, len; u32 opts[2]; @@ -7406,7 +7189,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - RTL_W8(TxPoll, NPQ); + RTL_W8(tp, TxPoll, NPQ); mmiowb(); @@ -7477,11 +7260,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) /* The infamous DAC f*ckup only happens at boot time */ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { - void __iomem *ioaddr = tp->mmio_addr; - netif_info(tp, intr, dev, "disabling PCI DAC\n"); tp->cp_cmd &= ~PCIDAC; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W16(tp, CPlusCmd, tp->cp_cmd); dev->features &= ~NETIF_F_HIGHDMA; } @@ -7513,7 +7294,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) */ dma_rmb(); - rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { u64_stats_update_begin(&tp->tx_stats.syncp); @@ -7547,11 +7328,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR */ - if (tp->cur_tx != dirty_tx) { - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(TxPoll, NPQ); - } + if (tp->cur_tx != dirty_tx) + RTL_W8(tp, TxPoll, NPQ); } } @@ -7577,7 +7355,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, dma_addr_t addr) { struct sk_buff *skb; - struct device *d = &tp->pci_dev->dev; + struct device *d = tp_to_dev(tp); data = rtl8169_align(data); dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); @@ -7732,7 +7510,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - rtl8169_check_link_status(dev, tp, tp->mmio_addr); + rtl8169_check_link_status(dev, tp); rtl_irq_enable_all(tp); } @@ -7804,21 +7582,20 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) return work_done; } -static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +static void rtl8169_rx_missed(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); if (tp->mac_version > RTL_GIGA_MAC_VER_06) return; - dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); - RTL_W32(RxMissed, 0); + dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff; + RTL_W32(tp, RxMissed, 0); } static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; del_timer_sync(&tp->timer); @@ -7831,7 +7608,7 @@ static void rtl8169_down(struct net_device *dev) * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) * and napi is disabled (rtl8169_poll). */ - rtl8169_rx_missed(dev, ioaddr); + rtl8169_rx_missed(dev); /* Give a racing hard_start_xmit a few cycles to complete. */ synchronize_sched(); @@ -7861,7 +7638,7 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - free_irq(pdev->irq, dev); + pci_free_irq(pdev, 0, dev); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -7880,14 +7657,13 @@ static void rtl8169_netpoll(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_interrupt(tp->pci_dev->irq, dev); + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev); } #endif static int rtl_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; int retval = -ENOMEM; @@ -7917,9 +7693,8 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); - retval = request_irq(pdev->irq, rtl8169_interrupt, - (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, - dev->name, dev); + retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev, + dev->name); if (retval < 0) goto err_release_fw_2; @@ -7947,7 +7722,7 @@ static int rtl_open(struct net_device *dev) tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); - rtl8169_check_link_status(dev, tp, ioaddr); + rtl8169_check_link_status(dev, tp); out: return retval; @@ -7971,7 +7746,6 @@ static void rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; struct rtl8169_counters *counters = tp->counters; unsigned int start; @@ -7979,7 +7753,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) pm_runtime_get_noresume(&pdev->dev); if (netif_running(dev) && pm_runtime_active(&pdev->dev)) - rtl8169_rx_missed(dev, ioaddr); + rtl8169_rx_missed(dev); do { start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); @@ -8102,7 +7876,7 @@ static int rtl8169_runtime_suspend(struct device *device) rtl8169_net_suspend(dev); /* Update counters before going runtime suspend */ - rtl8169_rx_missed(dev, tp->mmio_addr); + rtl8169_rx_missed(dev); rtl8169_update_counters(dev); return 0; @@ -8163,8 +7937,6 @@ static const struct dev_pm_ops rtl8169_pm_ops = { static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - /* WoL fails with 8168b when the receiver is disabled. */ switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: @@ -8172,9 +7944,9 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_17: pci_clear_master(tp->pci_dev); - RTL_W8(ChipCmd, CmdRxEnb); + RTL_W8(tp, ChipCmd, CmdRxEnb); /* PCI commit */ - RTL_R8(ChipCmd); + RTL_R8(tp, ChipCmd); break; default: break; @@ -8209,15 +7981,8 @@ static void rtl_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) rtl8168_driver_stop(tp); - } netif_napi_del(&tp->napi); @@ -8256,7 +8021,7 @@ static const struct rtl_cfg_info { unsigned int region; unsigned int align; u16 event_slow; - unsigned features; + unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; u8 default_ver; } rtl_cfg_infos [] = { @@ -8265,7 +8030,7 @@ static const struct rtl_cfg_info { .region = 1, .align = 0, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, - .features = RTL_FEATURE_GMII, + .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, .default_ver = RTL_GIGA_MAC_VER_01, }, @@ -8274,7 +8039,7 @@ static const struct rtl_cfg_info { .region = 2, .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow, - .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_11, }, @@ -8284,56 +8049,44 @@ static const struct rtl_cfg_info { .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, - .features = RTL_FEATURE_MSI, .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_13, } }; -/* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct rtl8169_private *tp, - const struct rtl_cfg_info *cfg) +static int rtl_alloc_irq(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; - unsigned msi = 0; - u8 cfg2; + unsigned int flags; - cfg2 = RTL_R8(Config2) & ~MSIEnable; - if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(tp->pci_dev)) { - netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); - } else { - cfg2 |= MSIEnable; - msi = RTL_FEATURE_MSI; - } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + flags = PCI_IRQ_LEGACY; + } else { + flags = PCI_IRQ_ALL_TYPES; } - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - RTL_W8(Config2, cfg2); - return msi; + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); } DECLARE_RTL_COND(rtl_link_list_ready_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R8(MCU) & LINK_LIST_RDY; + return RTL_R8(tp, MCU) & LINK_LIST_RDY; } DECLARE_RTL_COND(rtl_rxtx_empty_cond) { - void __iomem *ioaddr = tp->mmio_addr; - - return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } static void rtl_hw_init_8168g(struct rtl8169_private *tp) { - void __iomem *ioaddr = tp->mmio_addr; u32 data; tp->ocp_base = OCP_STD_PHY_BASE; - RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) return; @@ -8341,9 +8094,9 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp) if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) return; - RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); msleep(1); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); data = r8168_mac_ocp_read(tp, 0xe8de); data &= ~(1 << 14); @@ -8397,7 +8150,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct rtl8169_private *tp; struct mii_if_info *mii; struct net_device *dev; - void __iomem *ioaddr; int chipset, i; int rc; @@ -8423,7 +8175,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->mdio_write = rtl_mdio_write; mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; - mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + mii->supports_gmii = cfg->has_gmii; /* disable ASPM completely as that cause random device stop working * problems as well as full system hangs for some PCIe devices users */ @@ -8455,20 +8207,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - rc = pci_request_regions(pdev, MODULENAME); + rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); if (rc < 0) { - netif_err(tp, probe, dev, "could not request regions\n"); + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); return rc; } - /* ioremap MMIO region */ - ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region), - R8169_REGS_SIZE); - if (!ioaddr) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); - return -EIO; - } - tp->mmio_addr = ioaddr; + tp->mmio_addr = pcim_iomap_table(pdev)[region]; if (!pci_is_pcie(pdev)) netif_info(tp, probe, dev, "not PCI Express\n"); @@ -8518,41 +8263,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) chipset = tp->mac_version; tp->txd_version = rtl_chip_infos[chipset].txd_version; - RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W8(Config1, RTL_R8(Config1) | PMEnable); - RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) - tp->features |= RTL_FEATURE_WOL; - if ((RTL_R8(Config3) & LinkUp) != 0) - tp->features |= RTL_FEATURE_WOL; - break; - default: - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; - break; + rc = rtl_alloc_irq(tp); + if (rc < 0) { + netif_err(tp, probe, dev, "Can't allocate interrupt\n"); + return rc; } - if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) - tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(tp, cfg); - RTL_W8(Cfg9346, Cfg9346_Lock); + + /* override BIOS settings, use userspace tools to enable WOL */ + __rtl8169_set_wol(tp, 0); if (rtl_tbi_enabled(tp)) { tp->set_speed = rtl8169_set_speed_tbi; @@ -8600,7 +8318,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_rar_set(tp, (u8 *)mac_addr); } for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = RTL_R8(MAC0 + i); + dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); dev->ethtool_ops = &rtl8169_ethtool_ops; dev->watchdog_timeo = RTL8169_TX_TIMEOUT; @@ -8660,15 +8378,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!tp->counters) return -ENOMEM; + pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) return rc; - pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, - (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr, + (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), + pci_irq_vector(pdev, 0)); if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " "tx checksumming: %s]\n", @@ -8676,15 +8395,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); } - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) && - r8168_check_dash(tp)) { + if (r8168_check_dash(tp)) rtl8168_driver_start(tp); - } netif_carrier_off(dev); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 96a27b00c90e..b81f4faf7b10 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1018,6 +1018,7 @@ struct ravb_private { u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ u32 cur_tx[NUM_TX_QUEUE]; u32 dirty_tx[NUM_TX_QUEUE]; + u32 rx_buf_sz; /* Based on MTU+slack. */ struct napi_struct napi[NUM_RX_QUEUE]; struct work_struct work; /* MII transceiver section. */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..68f122140966 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -238,7 +238,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) le32_to_cpu(desc->dptr))) dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); } ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -300,9 +300,9 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); + rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -346,6 +346,9 @@ static int ravb_ring_init(struct net_device *ndev, int q) int ring_size; int i; + priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + + ETH_HLEN + VLAN_HLEN; + /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], sizeof(*priv->rx_skb[q]), GFP_KERNEL); @@ -355,7 +358,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); if (!skb) goto error; ravb_set_buffer_align(skb); @@ -586,7 +589,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -619,11 +622,12 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); + desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, - PKT_BUF_SZ + RAVB_ALIGN - 1); + priv->rx_buf_sz + + RAVB_ALIGN - 1); if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); @@ -1854,6 +1858,17 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) return phy_mii_ioctl(phydev, req, cmd); } +static int ravb_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + ndev->mtu = new_mtu; + netdev_update_features(ndev); + + return 0; +} + static void ravb_set_rx_csum(struct net_device *ndev, bool enable) { struct ravb_private *priv = netdev_priv(ndev); @@ -1895,6 +1910,7 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, .ndo_do_ioctl = ravb_do_ioctl, + .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = ravb_set_features, @@ -2117,6 +2133,9 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } + ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; @@ -2255,9 +2274,6 @@ static int ravb_wol_setup(struct net_device *ndev) /* Enable MagicPacket */ ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); - /* Increased clock usage so device won't be suspended */ - clk_enable(priv->clk); - return enable_irq_wake(priv->emac_irq); } @@ -2276,9 +2292,6 @@ static int ravb_wol_restore(struct net_device *ndev) if (ret < 0) return ret; - /* Restore clock usage count */ - clk_disable(priv->clk); - return disable_irq_wake(priv->emac_irq); } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a197e11f3a56..b6b90a6314e3 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -40,7 +40,6 @@ #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> -#include <linux/clk.h> #include <linux/sh_eth.h> #include <linux/of_mdio.h> @@ -124,8 +123,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_FWSL0] = 0x0030, [TSU_FWSL1] = 0x0034, [TSU_FWSLC] = 0x0038, - [TSU_QTAG0] = 0x0040, - [TSU_QTAG1] = 0x0044, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, [TSU_FWSR] = 0x0050, [TSU_FWINMK] = 0x0054, [TSU_ADQT0] = 0x0048, @@ -440,14 +439,15 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, enum_index); } -static bool sh_eth_is_gether(struct sh_eth_private *mdp) +static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) { - return mdp->reg_offset == sh_eth_offset_gigabit; + iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); } -static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) { - return mdp->reg_offset == sh_eth_offset_fast_rz; + return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); } static void sh_eth_select_mii(struct net_device *ndev) @@ -491,6 +491,62 @@ static void sh_eth_chip_reset(struct net_device *ndev) mdelay(1); } +static int sh_eth_soft_reset(struct net_device *ndev) +{ + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); + mdelay(3); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); + + return 0; +} + +static int sh_eth_check_soft_reset(struct net_device *ndev) +{ + int cnt; + + for (cnt = 100; cnt > 0; cnt--) { + if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) + return 0; + mdelay(1); + } + + netdev_err(ndev, "Device reset failed\n"); + return -ETIMEDOUT; +} + +static int sh_eth_soft_reset_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); + + ret = sh_eth_check_soft_reset(ndev); + if (ret) + return ret; + + /* Table Init */ + sh_eth_write(ndev, 0, TDLAR); + sh_eth_write(ndev, 0, TDFAR); + sh_eth_write(ndev, 0, TDFXR); + sh_eth_write(ndev, 0, TDFFR); + sh_eth_write(ndev, 0, RDLAR); + sh_eth_write(ndev, 0, RDFAR); + sh_eth_write(ndev, 0, RDFXR); + sh_eth_write(ndev, 0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_checksum) + sh_eth_write(ndev, 0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + + return ret; +} + static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -511,11 +567,14 @@ static void sh_eth_set_rate_gether(struct net_device *ndev) #ifdef CONFIG_OF /* R7S72100 */ static struct sh_eth_cpu_data r7s72100_data = { + .soft_reset = sh_eth_soft_reset_gether, + .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, .register_type = SH_ETH_REG_FAST_RZ, + .edtrr_trns = EDTRR_TRNS_GETHER, .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | @@ -542,8 +601,10 @@ static struct sh_eth_cpu_data r7s72100_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .xdfar_rw = 1, .hw_checksum = 1, .tsu = 1, + .no_tx_cntrs = 1, }; static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) @@ -555,12 +616,15 @@ static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) /* R8A7740 */ static struct sh_eth_cpu_data r8a7740_data = { + .soft_reset = sh_eth_soft_reset_gether, + .chip_reset = sh_eth_chip_reset_r8a7740, .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, .register_type = SH_ETH_REG_GIGABIT, + .edtrr_trns = EDTRR_TRNS_GETHER, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | @@ -587,10 +651,12 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .xdfar_rw = 1, .hw_checksum = 1, .tsu = 1, .select_mii = 1, .magic = 1, + .cexcr = 1, }; /* There is CPU dependent code */ @@ -610,11 +676,14 @@ static void sh_eth_set_rate_rcar(struct net_device *ndev) /* R-Car Gen1 */ static struct sh_eth_cpu_data rcar_gen1_data = { + .soft_reset = sh_eth_soft_reset, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_rcar, .register_type = SH_ETH_REG_FAST_RCAR, + .edtrr_trns = EDTRR_TRNS_ETHER, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | @@ -633,15 +702,19 @@ static struct sh_eth_cpu_data rcar_gen1_data = { .mpr = 1, .tpauser = 1, .hw_swap = 1, + .no_xdfar = 1, }; /* R-Car Gen2 and RZ/G1 */ static struct sh_eth_cpu_data rcar_gen2_data = { + .soft_reset = sh_eth_soft_reset, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_rcar, .register_type = SH_ETH_REG_FAST_RCAR, + .edtrr_trns = EDTRR_TRNS_ETHER, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@ -663,6 +736,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = { .mpr = 1, .tpauser = 1, .hw_swap = 1, + .no_xdfar = 1, .rmiimode = 1, .magic = 1, }; @@ -684,11 +758,14 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev) /* SH7724 */ static struct sh_eth_cpu_data sh7724_data = { + .soft_reset = sh_eth_soft_reset, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_sh7724, .register_type = SH_ETH_REG_FAST_SH4, + .edtrr_trns = EDTRR_TRNS_ETHER, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | @@ -726,11 +803,14 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev) /* SH7757 */ static struct sh_eth_cpu_data sh7757_data = { + .soft_reset = sh_eth_soft_reset, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_sh7757, .register_type = SH_ETH_REG_FAST_SH4, + .edtrr_trns = EDTRR_TRNS_ETHER, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | @@ -753,6 +833,7 @@ static struct sh_eth_cpu_data sh7757_data = { .rpadir = 1, .rpadir_value = 2 << 16, .rtrate = 1, + .dual_port = 1, }; #define SH_GIGA_ETH_BASE 0xfee00000UL @@ -797,12 +878,15 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) /* SH7757(GETHERC) */ static struct sh_eth_cpu_data sh7757_data_giga = { + .soft_reset = sh_eth_soft_reset_gether, + .chip_reset = sh_eth_chip_reset_giga, .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga, .register_type = SH_ETH_REG_GIGABIT, + .edtrr_trns = EDTRR_TRNS_GETHER, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | @@ -830,17 +914,23 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .xdfar_rw = 1, .tsu = 1, + .cexcr = 1, + .dual_port = 1, }; /* SH7734 */ static struct sh_eth_cpu_data sh7734_data = { + .soft_reset = sh_eth_soft_reset_gether, + .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, .register_type = SH_ETH_REG_GIGABIT, + .edtrr_trns = EDTRR_TRNS_GETHER, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | @@ -863,20 +953,25 @@ static struct sh_eth_cpu_data sh7734_data = { .hw_swap = 1, .no_trimd = 1, .no_ade = 1, + .xdfar_rw = 1, .tsu = 1, .hw_checksum = 1, .select_mii = 1, .magic = 1, + .cexcr = 1, }; /* SH7763 */ static struct sh_eth_cpu_data sh7763_data = { + .soft_reset = sh_eth_soft_reset_gether, + .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, .register_type = SH_ETH_REG_GIGABIT, + .edtrr_trns = EDTRR_TRNS_GETHER, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | @@ -898,14 +993,20 @@ static struct sh_eth_cpu_data sh7763_data = { .hw_swap = 1, .no_trimd = 1, .no_ade = 1, + .xdfar_rw = 1, .tsu = 1, .irq_flags = IRQF_SHARED, .magic = 1, + .cexcr = 1, + .dual_port = 1, }; static struct sh_eth_cpu_data sh7619_data = { + .soft_reset = sh_eth_soft_reset, + .register_type = SH_ETH_REG_FAST_SH3_SH2, + .edtrr_trns = EDTRR_TRNS_ETHER, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | @@ -922,8 +1023,11 @@ static struct sh_eth_cpu_data sh7619_data = { }; static struct sh_eth_cpu_data sh771x_data = { + .soft_reset = sh_eth_soft_reset, + .register_type = SH_ETH_REG_FAST_SH3_SH2, + .edtrr_trns = EDTRR_TRNS_ETHER, .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | @@ -933,6 +1037,7 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, + .dual_port = 1, }; static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) @@ -960,63 +1065,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; } -static int sh_eth_check_reset(struct net_device *ndev) -{ - int ret = 0; - int cnt = 100; - - while (cnt > 0) { - if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) - break; - mdelay(1); - cnt--; - } - if (cnt <= 0) { - netdev_err(ndev, "Device reset failed\n"); - ret = -ETIMEDOUT; - } - return ret; -} - -static int sh_eth_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - int ret = 0; - - if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); - - ret = sh_eth_check_reset(ndev); - if (ret) - return ret; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - /* Reset HW CRC register */ - if (mdp->cd->hw_checksum) - sh_eth_write(ndev, 0x0, CSMR); - - /* Select MII mode */ - if (mdp->cd->select_mii) - sh_eth_select_mii(ndev); - } else { - sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); - mdelay(3); - sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); - } - - return ret; -} - static void sh_eth_set_receive_align(struct sk_buff *skb) { uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); @@ -1059,14 +1107,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac) } } -static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) -{ - if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) - return EDTRR_TRNS_GETHER; - else - return EDTRR_TRNS_ETHER; -} - struct bb_info { void (*set_gate)(void *addr); struct mdiobb_ctrl ctrl; @@ -1263,8 +1303,7 @@ static void sh_eth_ring_format(struct net_device *ndev) /* Rx descriptor address set */ if (i == 0) { sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); - if (sh_eth_is_gether(mdp) || - sh_eth_is_rz_fast_ether(mdp)) + if (mdp->cd->xdfar_rw) sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); } } @@ -1286,8 +1325,7 @@ static void sh_eth_ring_format(struct net_device *ndev) if (i == 0) { /* Tx descriptor address set */ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); - if (sh_eth_is_gether(mdp) || - sh_eth_is_rz_fast_ether(mdp)) + if (mdp->cd->xdfar_rw) sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); } } @@ -1352,7 +1390,7 @@ static int sh_eth_dev_init(struct net_device *ndev) int ret; /* Soft Reset */ - ret = sh_eth_reset(ndev); + ret = mdp->cd->soft_reset(ndev); if (ret) return ret; @@ -1453,7 +1491,7 @@ static void sh_eth_dev_exit(struct net_device *ndev) */ msleep(2); /* max frame time at 10 Mbps < 1250 us */ sh_eth_get_stats(ndev); - sh_eth_reset(ndev); + mdp->cd->soft_reset(ndev); /* Set MAC address again */ update_mac_address(ndev); @@ -1579,8 +1617,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ - if (intr_status & EESR_RDE && - mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { + if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { u32 count = (sh_eth_read(ndev, RDFAR) - sh_eth_read(ndev, RDLAR)) >> 4; @@ -1706,9 +1743,9 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) sh_eth_tx_free(ndev, true); /* SH7712 BUG */ - if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { + if (edtrr ^ mdp->cd->edtrr_trns) { /* tx dma start */ - sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); } /* wakeup */ netif_wake_queue(ndev); @@ -2102,8 +2139,6 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_tsu_reg(TSU_FWSL0); add_tsu_reg(TSU_FWSL1); add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAG0); - add_tsu_reg(TSU_QTAG1); add_tsu_reg(TSU_QTAGM0); add_tsu_reg(TSU_QTAGM1); add_tsu_reg(TSU_FWSR); @@ -2118,22 +2153,17 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_tsu_reg(TSU_POST2); add_tsu_reg(TSU_POST3); add_tsu_reg(TSU_POST4); - if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { - /* This is the start of a table, not just a single - * register. - */ - if (buf) { - unsigned int i; - - mark_reg_valid(TSU_ADRH0); - for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) - *buf++ = ioread32( - mdp->tsu_addr + - mdp->reg_offset[TSU_ADRH0] + - i * 4); - } - len += SH_ETH_TSU_CAM_ENTRIES * 2; + /* This is the start of a table, not just a single register. */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32(mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); } + len += SH_ETH_TSU_CAM_ENTRIES * 2; } #undef mark_reg_valid @@ -2304,7 +2334,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) wol->supported = 0; wol->wolopts = 0; - if (mdp->cd->magic && mdp->clk) { + if (mdp->cd->magic) { wol->supported = WAKE_MAGIC; wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; } @@ -2314,7 +2344,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) + if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -2469,8 +2499,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) mdp->cur_tx++; - if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) - sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) + sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); return NETDEV_TX_OK; } @@ -2495,7 +2525,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (sh_eth_is_rz_fast_ether(mdp)) + if (mdp->cd->no_tx_cntrs) return &ndev->stats; if (!mdp->is_opened) @@ -2505,7 +2535,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); - if (sh_eth_is_gether(mdp)) { + if (mdp->cd->cexcr) { sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, CERCR); sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, @@ -2922,7 +2952,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { - if (sh_eth_is_rz_fast_ether(mdp)) { + if (!mdp->cd->dual_port) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); /* Enable POST registers */ @@ -2939,13 +2969,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) sh_eth_tsu_write(mdp, 0, TSU_FWSL0); sh_eth_tsu_write(mdp, 0, TSU_FWSL1); sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); - if (sh_eth_is_gether(mdp)) { - sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ - sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ - } else { - sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ - sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ - } + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ @@ -3153,11 +3178,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } - /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ - mdp->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(mdp->clk)) - mdp->clk = NULL; - ndev->base_addr = res->start; spin_lock_init(&mdp->lock); @@ -3278,7 +3298,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; - if (mdp->cd->magic && mdp->clk) + if (mdp->cd->magic) device_set_wakeup_capable(&pdev->dev, 1); /* print device information */ @@ -3331,9 +3351,6 @@ static int sh_eth_wol_setup(struct net_device *ndev) /* Enable MagicPacket */ sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); - /* Increased clock usage so device won't be suspended */ - clk_enable(mdp->clk); - return enable_irq_wake(ndev->irq); } @@ -3359,9 +3376,6 @@ static int sh_eth_wol_restore(struct net_device *ndev) if (ret < 0) return ret; - /* Restore clock usage count */ - clk_disable(mdp->clk); - return disable_irq_wake(ndev->irq); } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..a5b792ce2ae7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -118,8 +118,8 @@ enum { TSU_FWSL0, TSU_FWSL1, TSU_FWSLC, - TSU_QTAG0, - TSU_QTAG1, + TSU_QTAG0, /* Same as TSU_QTAGM0 */ + TSU_QTAG1, /* Same as TSU_QTAGM1 */ TSU_QTAGM0, TSU_QTAGM1, TSU_FWSR, @@ -469,6 +469,9 @@ struct sh_eth_rxdesc { /* This structure is used by each CPU dependency handling. */ struct sh_eth_cpu_data { + /* mandatory functions */ + int (*soft_reset)(struct net_device *ndev); + /* optional functions */ void (*chip_reset)(struct net_device *ndev); void (*set_duplex)(struct net_device *ndev); @@ -476,6 +479,7 @@ struct sh_eth_cpu_data { /* mandatory initialize value */ int register_type; + u32 edtrr_trns; u32 eesipr_value; /* optional initialize value */ @@ -504,11 +508,16 @@ struct sh_eth_cpu_data { unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ + unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ + unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ unsigned hw_checksum:1; /* E-DMAC has CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned no_tx_cntrs:1; /* EtherC DOES NOT have TX error counters */ + unsigned cexcr:1; /* EtherC has CERCR/CEECR */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ }; struct sh_eth_private { @@ -567,15 +576,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, return mdp->tsu_addr + mdp->reg_offset[enum_index]; } -static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, - int enum_index) -{ - iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - -static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) -{ - return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - #endif /* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index fd35d8004a78..a9da1ad4b4f2 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -57,9 +57,9 @@ static int debug = -1; static int eee_timer = SXGBE_DEFAULT_LPI_TIMER; -module_param(eee_timer, int, S_IRUGO | S_IWUSR); +module_param(eee_timer, int, 0644); -module_param(debug, int, S_IRUGO | S_IWUSR); +module_param(debug, int, 0644); static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 75fbf58e421c..50daad0a1482 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -28,9 +28,6 @@ enum { EFX_EF10_TEST = 1, EFX_EF10_REFILL, }; - -/* The reserved RSS context value */ -#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff /* The maximum size of a shared RSS context */ /* TODO: this should really be from the mcdi protocol export */ #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL @@ -99,17 +96,15 @@ struct efx_ef10_filter_table { MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; unsigned int rx_match_count; + struct rw_semaphore lock; /* Protects entries */ struct { unsigned long spec; /* pointer to spec plus flag bits */ -/* BUSY flag indicates that an update is in progress. AUTO_OLD is - * used to mark and sweep MAC filters for the device address lists. - */ -#define EFX_EF10_FILTER_FLAG_BUSY 1UL +/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */ +/* unused flag 1UL */ #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL #define EFX_EF10_FILTER_FLAGS 3UL u64 handle; /* firmware handle */ } *entry; - wait_queue_head_t waitq; /* Shadow of net_device address lists, guarded by mac_lock */ struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; @@ -697,7 +692,7 @@ static int efx_ef10_probe(struct efx_nic *efx) } nic_data->warm_boot_count = rc; - nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; nic_data->vport_id = EVB_PORT_ID_ASSIGNED; @@ -1489,8 +1484,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx) } /* don't fail init if RSS setup doesn't work */ - rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); - efx->rss_active = (rc == 0); + rc = efx->type->rx_push_rss_config(efx, false, + efx->rss_context.rx_indir_table, NULL); return 0; } @@ -1504,10 +1499,11 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) /* All our allocations have been reset */ nic_data->must_realloc_vis = true; + nic_data->must_restore_rss_contexts = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; efx_ef10_forget_old_piobufs(efx); - nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ nic_data->must_probe_vswitching = true; @@ -1670,7 +1666,6 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), - EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START), EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), @@ -1781,7 +1776,6 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { * These bits are in the second u64 of the raw mask. */ #define EF10_CTPIO_STAT_MASK ( \ - (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \ (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ @@ -2703,27 +2697,30 @@ static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we * just need to set the UDP ports flags (for both IP versions). */ -static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context) +static void efx_ef10_set_rss_flags(struct efx_nic *efx, + struct efx_rss_context *ctx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); u32 flags; BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); - if (efx_ef10_get_rss_flags(efx, context, &flags) != 0) + if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0) return; - MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, + ctx->context_id); flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), NULL, 0, NULL)) /* Succeeded, so UDP 4-tuple is now enabled */ - efx->rx_hash_udp_4tuple = true; + ctx->rx_hash_udp_4tuple = true; } -static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, - bool exclusive, unsigned *context_size) +static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive, + struct efx_rss_context *ctx, + unsigned *context_size) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); @@ -2739,7 +2736,7 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); if (!exclusive && rss_spread == 1) { - *context = EFX_EF10_RSS_CONTEXT_INVALID; + ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID; if (context_size) *context_size = 1; return 0; @@ -2762,29 +2759,26 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) return -EIO; - *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); if (context_size) *context_size = rss_spread; if (nic_data->datapath_caps & 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) - efx_ef10_set_rss_flags(efx, *context); + efx_ef10_set_rss_flags(efx, ctx); return 0; } -static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) +static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); - int rc; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, context); - - rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), + return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), NULL, 0, NULL); - WARN_ON(rc != 0); } static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, @@ -2796,15 +2790,15 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, context); - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); - /* This iterates over the length of efx->rx_indir_table, but copies - * bytes from rx_indir_table. That's because the latter is a pointer - * rather than an array, but should have the same length. - * The efx->rx_hash_key loop below is similar. + /* This iterates over the length of efx->rss_context.rx_indir_table, but + * copies bytes from rx_indir_table. That's because the latter is a + * pointer rather than an array, but should have the same length. + * The efx->rss_context.rx_hash_key loop below is similar. */ - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = (u8) rx_indir_table[i]; @@ -2816,9 +2810,9 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, context); - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); - for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i) MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, @@ -2827,27 +2821,27 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; - if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) - efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); - nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) { + rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id); + WARN_ON(rc != 0); + } + efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID; } static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, unsigned *context_size) { - u32 new_rx_rss_context; struct efx_ef10_nic_data *nic_data = efx->nic_data; - int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, - false, context_size); + int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context, + context_size); if (rc != 0) return rc; - nic_data->rx_rss_context = new_rx_rss_context; nic_data->rx_rss_context_exclusive = false; - efx_set_default_rx_indir_table(efx); + efx_set_default_rx_indir_table(efx, &efx->rss_context); return 0; } @@ -2855,65 +2849,98 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, const u32 *rx_indir_table, const u8 *key) { + u32 old_rx_rss_context = efx->rss_context.context_id; struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; - u32 new_rx_rss_context; - if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || + if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID || !nic_data->rx_rss_context_exclusive) { - rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, - true, NULL); + rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context, + NULL); if (rc == -EOPNOTSUPP) return rc; else if (rc != 0) goto fail1; - } else { - new_rx_rss_context = nic_data->rx_rss_context; } - rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, + rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id, rx_indir_table, key); if (rc != 0) goto fail2; - if (nic_data->rx_rss_context != new_rx_rss_context) - efx_ef10_rx_free_indir_table(efx); - nic_data->rx_rss_context = new_rx_rss_context; + if (efx->rss_context.context_id != old_rx_rss_context && + old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) + WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0); nic_data->rx_rss_context_exclusive = true; - if (rx_indir_table != efx->rx_indir_table) - memcpy(efx->rx_indir_table, rx_indir_table, - sizeof(efx->rx_indir_table)); - if (key != efx->rx_hash_key) - memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); + if (rx_indir_table != efx->rss_context.rx_indir_table) + memcpy(efx->rss_context.rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key != efx->rss_context.rx_hash_key) + memcpy(efx->rss_context.rx_hash_key, key, + efx->type->rx_hash_key_size); return 0; fail2: - if (new_rx_rss_context != nic_data->rx_rss_context) - efx_ef10_free_rss_context(efx, new_rx_rss_context); + if (old_rx_rss_context != efx->rss_context.context_id) { + WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0); + efx->rss_context.context_id = old_rx_rss_context; + } fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } -static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key) +{ + int rc; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { + rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL); + if (rc) + return rc; + } + + if (!rx_indir_table) /* Delete this context */ + return efx_ef10_free_rss_context(efx, ctx->context_id); + + rc = efx_ef10_populate_rss_table(efx, ctx->context_id, + rx_indir_table, key); + if (rc) + return rc; + + memcpy(ctx->rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); + + return 0; +} + +static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); size_t outlen; int rc, i; + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); - if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) return -ENOENT; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, - nic_data->rx_rss_context); - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + ctx->context_id); + BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), tablebuf, sizeof(tablebuf), &outlen); @@ -2923,13 +2950,13 @@ static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) return -EIO; - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = MCDI_PTR(tablebuf, + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) + ctx->rx_indir_table[i] = MCDI_PTR(tablebuf, RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, - nic_data->rx_rss_context); - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + ctx->context_id); + BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), keybuf, sizeof(keybuf), &outlen); @@ -2939,13 +2966,50 @@ static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) return -EIO; - for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) - efx->rx_hash_key[i] = MCDI_PTR( + for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i) + ctx->rx_hash_key[i] = MCDI_PTR( keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; return 0; } +static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +{ + int rc; + + mutex_lock(&efx->rss_lock); + rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context); + mutex_unlock(&efx->rss_lock); + return rc; +} + +static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_rss_context *ctx; + int rc; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + if (!nic_data->must_restore_rss_contexts) + return; + + list_for_each_entry(ctx, &efx->rss_context.list, list) { + /* previous NIC RSS context is gone */ + ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID; + /* so try to allocate a new one */ + rc = efx_ef10_rx_push_rss_context_config(efx, ctx, + ctx->rx_indir_table, + ctx->rx_hash_key); + if (rc) + netif_warn(efx, probe, efx->net_dev, + "failed to restore RSS context %u, rc=%d" + "; RSS filters may fail to be applied\n", + ctx->user_id, rc); + } + nic_data->must_restore_rss_contexts = false; +} + static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key) @@ -2956,7 +3020,7 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, return 0; if (!key) - key = efx->rx_hash_key; + key = efx->rss_context.rx_hash_key; rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); @@ -2965,7 +3029,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, bool mismatch = false; size_t i; - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; + for (i = 0; + i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch; i++) mismatch = rx_indir_table[i] != ethtool_rxfh_indir_default(i, efx->rss_spread); @@ -3000,11 +3065,9 @@ static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, const u8 *key __attribute__ ((unused))) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - if (user) return -EOPNOTSUPP; - if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) + if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) return 0; return efx_ef10_rx_push_shared_rss_config(efx, NULL); } @@ -4109,6 +4172,7 @@ efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, static void efx_ef10_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, + struct efx_rss_context *ctx, bool replacing) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4116,11 +4180,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); - /* Remove RSS flag if we don't have an RSS context. */ - if (flags & EFX_FILTER_FLAG_RX_RSS && - spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && - nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) - flags &= ~EFX_FILTER_FLAG_RX_RSS; + /* If RSS filter, caller better have given us an RSS context */ + if (flags & EFX_FILTER_FLAG_RX_RSS) { + /* We don't have the ability to return an error, so we'll just + * log a warning and disable RSS for the filter. + */ + if (WARN_ON_ONCE(!ctx)) + flags &= ~EFX_FILTER_FLAG_RX_RSS; + else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)) + flags &= ~EFX_FILTER_FLAG_RX_RSS; + } if (replacing) { MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, @@ -4146,21 +4215,18 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); if (flags & EFX_FILTER_FLAG_RX_RSS) - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, - spec->rss_context != - EFX_FILTER_RSS_CONTEXT_DEFAULT ? - spec->rss_context : nic_data->rx_rss_context); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id); } static int efx_ef10_filter_push(struct efx_nic *efx, - const struct efx_filter_spec *spec, - u64 *handle, bool replacing) + const struct efx_filter_spec *spec, u64 *handle, + struct efx_rss_context *ctx, bool replacing) { MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); int rc; - efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); + efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); if (rc == 0) @@ -4249,25 +4315,35 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace_equal) { - struct efx_ef10_filter_table *table = efx->filter_state; DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_filter_table *table; struct efx_filter_spec *saved_spec; + struct efx_rss_context *ctx = NULL; unsigned int match_pri, hash; unsigned int priv_flags; + bool rss_locked = false; bool replacing = false; + unsigned int depth, i; int ins_index = -1; DEFINE_WAIT(wait); bool is_mc_recip; s32 rc; + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); + /* For now, only support RX filters */ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != - EFX_FILTER_FLAG_RX) - return -EINVAL; + EFX_FILTER_FLAG_RX) { + rc = -EINVAL; + goto out_unlock; + } rc = efx_ef10_filter_pri(table, spec); if (rc < 0) - return rc; + goto out_unlock; match_pri = rc; hash = efx_ef10_filter_hash(spec); @@ -4275,80 +4351,71 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, if (is_mc_recip) bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { + mutex_lock(&efx->rss_lock); + rss_locked = true; + if (spec->rss_context) + ctx = efx_find_rss_context_entry(efx, spec->rss_context); + else + ctx = &efx->rss_context; + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + } + /* Find any existing filters with the same match tuple or - * else a free slot to insert at. If any of them are busy, - * we have to wait and retry. + * else a free slot to insert at. */ - for (;;) { - unsigned int depth = 1; - unsigned int i; - - spin_lock_bh(&efx->filter_lock); - - for (;;) { - i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); - saved_spec = efx_ef10_filter_entry_spec(table, i); + for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); - if (!saved_spec) { - if (ins_index < 0) - ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { - if (table->entry[i].spec & - EFX_EF10_FILTER_FLAG_BUSY) - break; - if (spec->priority < saved_spec->priority && - spec->priority != EFX_FILTER_PRI_AUTO) { - rc = -EPERM; - goto out_unlock; - } - if (!is_mc_recip) { - /* This is the only one */ - if (spec->priority == - saved_spec->priority && - !replace_equal) { - rc = -EEXIST; - goto out_unlock; - } - ins_index = i; - goto found; - } else if (spec->priority > - saved_spec->priority || - (spec->priority == - saved_spec->priority && - replace_equal)) { - if (ins_index < 0) - ins_index = i; - else - __set_bit(depth, mc_rem_map); - } + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_ef10_filter_equal(spec, saved_spec)) { + if (spec->priority < saved_spec->priority && + spec->priority != EFX_FILTER_PRI_AUTO) { + rc = -EPERM; + goto out_unlock; } - - /* Once we reach the maximum search depth, use - * the first suitable slot or return -EBUSY if - * there was none - */ - if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { - if (ins_index < 0) { - rc = -EBUSY; + if (!is_mc_recip) { + /* This is the only one */ + if (spec->priority == + saved_spec->priority && + !replace_equal) { + rc = -EEXIST; goto out_unlock; } - goto found; + ins_index = i; + break; + } else if (spec->priority > + saved_spec->priority || + (spec->priority == + saved_spec->priority && + replace_equal)) { + if (ins_index < 0) + ins_index = i; + else + __set_bit(depth, mc_rem_map); } - - ++depth; } - - prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_bh(&efx->filter_lock); - schedule(); } -found: - /* Create a software table entry if necessary, and mark it - * busy. We might yet fail to insert, but any attempt to - * insert a conflicting filter while we're waiting for the - * firmware must find the busy entry. + /* Once we reach the maximum search depth, use the first suitable + * slot, or return -EBUSY if there was none */ + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + + /* Create a software table entry if necessary. */ saved_spec = efx_ef10_filter_entry_spec(table, ins_index); if (saved_spec) { if (spec->priority == EFX_FILTER_PRI_AUTO && @@ -4372,28 +4439,19 @@ found: *saved_spec = *spec; priv_flags = 0; } - efx_ef10_filter_set_entry(table, ins_index, saved_spec, - priv_flags | EFX_EF10_FILTER_FLAG_BUSY); - - /* Mark lower-priority multicast recipients busy prior to removal */ - if (is_mc_recip) { - unsigned int depth, i; - - for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { - i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); - if (test_bit(depth, mc_rem_map)) - table->entry[i].spec |= - EFX_EF10_FILTER_FLAG_BUSY; - } - } - - spin_unlock_bh(&efx->filter_lock); + efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); + /* Actually insert the filter on the HW */ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, - replacing); + ctx, replacing); + + if (rc == -EINVAL && nic_data->must_realloc_vis) + /* The MC rebooted under us, causing it to reject our filter + * insertion as pointing to an invalid VI (spec->dmaq_id). + */ + rc = -EAGAIN; /* Finalise the software table entry */ - spin_lock_bh(&efx->filter_lock); if (rc == 0) { if (replacing) { /* Update the fields that may differ */ @@ -4409,6 +4467,12 @@ found: } else if (!replacing) { kfree(saved_spec); saved_spec = NULL; + } else { + /* We failed to replace, so the old filter is still present. + * Roll back the software table to reflect this. In fact the + * efx_ef10_filter_set_entry() call below will do the right + * thing, so nothing extra is needed here. + */ } efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); @@ -4430,7 +4494,6 @@ found: priv_flags = efx_ef10_filter_entry_flags(table, i); if (rc == 0) { - spin_unlock_bh(&efx->filter_lock); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, @@ -4438,15 +4501,12 @@ found: rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), NULL, 0, NULL); - spin_lock_bh(&efx->filter_lock); } if (rc == 0) { kfree(saved_spec); saved_spec = NULL; priv_flags = 0; - } else { - priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; } efx_ef10_filter_set_entry(table, i, saved_spec, priv_flags); @@ -4457,10 +4517,11 @@ found: if (rc == 0) rc = efx_ef10_make_filter_id(match_pri, ins_index); - wake_up_all(&table->waitq); out_unlock: - spin_unlock_bh(&efx->filter_lock); - finish_wait(&table->waitq, &wait); + if (rss_locked) + mutex_unlock(&efx->rss_lock); + up_write(&table->lock); + up_read(&efx->filter_sem); return rc; } @@ -4473,6 +4534,8 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) * If !by_index, remove by ID * If by_index, remove by index * Filter ID may come from userland and must be range-checked. + * Caller must hold efx->filter_sem for read, and efx->filter_state->lock + * for write. */ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, unsigned int priority_mask, @@ -4487,45 +4550,23 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, DEFINE_WAIT(wait); int rc; - /* Find the software table entry and mark it busy. Don't - * remove it yet; any attempt to update while we're waiting - * for the firmware must find the busy entry. - */ - for (;;) { - spin_lock_bh(&efx->filter_lock); - if (!(table->entry[filter_idx].spec & - EFX_EF10_FILTER_FLAG_BUSY)) - break; - prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_bh(&efx->filter_lock); - schedule(); - } - spec = efx_ef10_filter_entry_spec(table, filter_idx); if (!spec || (!by_index && efx_ef10_filter_pri(table, spec) != - efx_ef10_filter_get_unsafe_pri(filter_id))) { - rc = -ENOENT; - goto out_unlock; - } + efx_ef10_filter_get_unsafe_pri(filter_id))) + return -ENOENT; if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { /* Just remove flags */ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; - rc = 0; - goto out_unlock; - } - - if (!(priority_mask & (1U << spec->priority))) { - rc = -ENOENT; - goto out_unlock; + return 0; } - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; - spin_unlock_bh(&efx->filter_lock); + if (!(priority_mask & (1U << spec->priority))) + return -ENOENT; if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { /* Reset to an automatic filter */ @@ -4534,15 +4575,15 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - (efx_rss_enabled(efx) ? + (efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; - new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; + new_spec.rss_context = 0; rc = efx_ef10_filter_push(efx, &new_spec, &table->entry[filter_idx].handle, + &efx->rss_context, true); - spin_lock_bh(&efx->filter_lock); if (rc == 0) *spec = new_spec; } else { @@ -4557,7 +4598,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), NULL, 0, NULL); - spin_lock_bh(&efx->filter_lock); if ((rc == 0) || (rc == -ENOENT)) { /* Filter removed OK or didn't actually exist */ kfree(spec); @@ -4569,11 +4609,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, } } - table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; - wake_up_all(&table->waitq); -out_unlock: - spin_unlock_bh(&efx->filter_lock); - finish_wait(&table->waitq, &wait); return rc; } @@ -4581,17 +4616,33 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) { - return efx_ef10_filter_remove_internal(efx, 1U << priority, - filter_id, false); + struct efx_ef10_filter_table *table; + int rc; + + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); + rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, + false); + up_write(&table->lock); + up_read(&efx->filter_sem); + return rc; } +/* Caller must hold efx->filter_sem for read */ static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) { + struct efx_ef10_filter_table *table = efx->filter_state; + if (filter_id == EFX_EF10_FILTER_ID_INVALID) return; - efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); + + down_write(&table->lock); + efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, + true); + up_write(&table->lock); } static int efx_ef10_filter_get_safe(struct efx_nic *efx, @@ -4599,11 +4650,13 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, u32 filter_id, struct efx_filter_spec *spec) { unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); - struct efx_ef10_filter_table *table = efx->filter_state; const struct efx_filter_spec *saved_spec; + struct efx_ef10_filter_table *table; int rc; - spin_lock_bh(&efx->filter_lock); + down_read(&efx->filter_sem); + table = efx->filter_state; + down_read(&table->lock); saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && efx_ef10_filter_pri(table, saved_spec) == @@ -4613,13 +4666,15 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, } else { rc = -ENOENT; } - spin_unlock_bh(&efx->filter_lock); + up_read(&table->lock); + up_read(&efx->filter_sem); return rc; } static int efx_ef10_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority) + enum efx_filter_priority priority) { + struct efx_ef10_filter_table *table; unsigned int priority_mask; unsigned int i; int rc; @@ -4627,31 +4682,40 @@ static int efx_ef10_filter_clear_rx(struct efx_nic *efx, priority_mask = (((1U << (priority + 1)) - 1) & ~(1U << EFX_FILTER_PRI_AUTO)); + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { rc = efx_ef10_filter_remove_internal(efx, priority_mask, i, true); if (rc && rc != -ENOENT) - return rc; + break; + rc = 0; } - return 0; + up_write(&table->lock); + up_read(&efx->filter_sem); + return rc; } static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, enum efx_filter_priority priority) { - struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_table *table; unsigned int filter_idx; s32 count = 0; - spin_lock_bh(&efx->filter_lock); + down_read(&efx->filter_sem); + table = efx->filter_state; + down_read(&table->lock); for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { if (table->entry[filter_idx].spec && efx_ef10_filter_entry_spec(table, filter_idx)->priority == priority) ++count; } - spin_unlock_bh(&efx->filter_lock); + up_read(&table->lock); + up_read(&efx->filter_sem); return count; } @@ -4666,12 +4730,15 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, enum efx_filter_priority priority, u32 *buf, u32 size) { - struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_table *table; struct efx_filter_spec *spec; unsigned int filter_idx; s32 count = 0; - spin_lock_bh(&efx->filter_lock); + down_read(&efx->filter_sem); + table = efx->filter_state; + down_read(&table->lock); + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { spec = efx_ef10_filter_entry_spec(table, filter_idx); if (spec && spec->priority == priority) { @@ -4685,201 +4752,42 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, filter_idx); } } - spin_unlock_bh(&efx->filter_lock); + up_read(&table->lock); + up_read(&efx->filter_sem); return count; } #ifdef CONFIG_RFS_ACCEL -static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; - -static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, - struct efx_filter_spec *spec) -{ - struct efx_ef10_filter_table *table = efx->filter_state; - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); - struct efx_filter_spec *saved_spec; - unsigned int hash, i, depth = 1; - bool replacing = false; - int ins_index = -1; - u64 cookie; - s32 rc; - - /* Must be an RX filter without RSS and not for a multicast - * destination address (RFS only works for connected sockets). - * These restrictions allow us to pass only a tiny amount of - * data through to the completion function. - */ - EFX_WARN_ON_PARANOID(spec->flags != - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); - EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); - EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); - - hash = efx_ef10_filter_hash(spec); - - spin_lock_bh(&efx->filter_lock); - - /* Find any existing filter with the same match tuple or else - * a free slot to insert at. If an existing filter is busy, - * we have to give up. - */ - for (;;) { - i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); - saved_spec = efx_ef10_filter_entry_spec(table, i); - - if (!saved_spec) { - if (ins_index < 0) - ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { - if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { - rc = -EBUSY; - goto fail_unlock; - } - if (spec->priority < saved_spec->priority) { - rc = -EPERM; - goto fail_unlock; - } - ins_index = i; - break; - } - - /* Once we reach the maximum search depth, use the - * first suitable slot or return -EBUSY if there was - * none - */ - if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { - if (ins_index < 0) { - rc = -EBUSY; - goto fail_unlock; - } - break; - } - - ++depth; - } - - /* Create a software table entry if necessary, and mark it - * busy. We might yet fail to insert, but any attempt to - * insert a conflicting filter while we're waiting for the - * firmware must find the busy entry. - */ - saved_spec = efx_ef10_filter_entry_spec(table, ins_index); - if (saved_spec) { - replacing = true; - } else { - saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); - if (!saved_spec) { - rc = -ENOMEM; - goto fail_unlock; - } - *saved_spec = *spec; - } - efx_ef10_filter_set_entry(table, ins_index, saved_spec, - EFX_EF10_FILTER_FLAG_BUSY); - - spin_unlock_bh(&efx->filter_lock); - - /* Pack up the variables needed on completion */ - cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; - - efx_ef10_filter_push_prep(efx, spec, inbuf, - table->entry[ins_index].handle, replacing); - efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), - MC_CMD_FILTER_OP_OUT_LEN, - efx_ef10_filter_rfs_insert_complete, cookie); - - return ins_index; - -fail_unlock: - spin_unlock_bh(&efx->filter_lock); - return rc; -} - -static void -efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, - int rc, efx_dword_t *outbuf, - size_t outlen_actual) -{ - struct efx_ef10_filter_table *table = efx->filter_state; - unsigned int ins_index, dmaq_id; - struct efx_filter_spec *spec; - bool replacing; - - /* Unpack the cookie */ - replacing = cookie >> 31; - ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); - dmaq_id = cookie & 0xffff; - - spin_lock_bh(&efx->filter_lock); - spec = efx_ef10_filter_entry_spec(table, ins_index); - if (rc == 0) { - table->entry[ins_index].handle = - MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); - if (replacing) - spec->dmaq_id = dmaq_id; - } else if (!replacing) { - kfree(spec); - spec = NULL; - } - efx_ef10_filter_set_entry(table, ins_index, spec, 0); - spin_unlock_bh(&efx->filter_lock); - - wake_up_all(&table->waitq); -} - -static void -efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, - unsigned long filter_idx, - int rc, efx_dword_t *outbuf, - size_t outlen_actual); - static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int filter_idx) { - struct efx_ef10_filter_table *table = efx->filter_state; - struct efx_filter_spec *spec = - efx_ef10_filter_entry_spec(table, filter_idx); - MCDI_DECLARE_BUF(inbuf, - MC_CMD_FILTER_OP_IN_HANDLE_OFST + - MC_CMD_FILTER_OP_IN_HANDLE_LEN); - - if (!spec || - (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || - spec->priority != EFX_FILTER_PRI_HINT || - !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, - flow_id, filter_idx)) - return false; - - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, - MC_CMD_FILTER_OP_IN_OP_REMOVE); - MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, - table->entry[filter_idx].handle); - if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, - efx_ef10_filter_rfs_expire_complete, filter_idx)) - return false; + struct efx_ef10_filter_table *table; + struct efx_filter_spec *spec; + bool ret; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; - return true; -} + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); + spec = efx_ef10_filter_entry_spec(table, filter_idx); -static void -efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, - unsigned long filter_idx, - int rc, efx_dword_t *outbuf, - size_t outlen_actual) -{ - struct efx_ef10_filter_table *table = efx->filter_state; - struct efx_filter_spec *spec = - efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { + ret = true; + goto out_unlock; + } - spin_lock_bh(&efx->filter_lock); - if (rc == 0) { - kfree(spec); - efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, + flow_id, filter_idx)) { + ret = false; + goto out_unlock; } - table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; - wake_up_all(&table->waitq); - spin_unlock_bh(&efx->filter_lock); + + ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, + filter_idx, true) == 0; +out_unlock: + up_write(&table->lock); + up_read(&efx->filter_sem); + return ret; } #endif /* CONFIG_RFS_ACCEL */ @@ -5074,9 +4982,9 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) table->vlan_filter = !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); INIT_LIST_HEAD(&table->vlan_list); + init_rwsem(&table->lock); efx->filter_state = table; - init_waitqueue_head(&table->waitq); list_for_each_entry(vlan, &nic_data->vlan_list, list) { rc = efx_ef10_filter_add_vlan(efx, vlan->vid); @@ -5104,6 +5012,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) unsigned int invalid_filters = 0, failed = 0; struct efx_ef10_filter_vlan *vlan; struct efx_filter_spec *spec; + struct efx_rss_context *ctx; unsigned int filter_idx; u32 mcdi_flags; int match_pri; @@ -5117,7 +5026,8 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (!table) return; - spin_lock_bh(&efx->filter_lock); + down_write(&table->lock); + mutex_lock(&efx->rss_lock); for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { spec = efx_ef10_filter_entry_spec(table, filter_idx); @@ -5133,20 +5043,32 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) invalid_filters++; goto not_restored; } - if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && - spec->rss_context != nic_data->rx_rss_context) - netif_warn(efx, drv, efx->net_dev, - "Warning: unable to restore a filter with specific RSS context.\n"); - - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; - spin_unlock_bh(&efx->filter_lock); + if (spec->rss_context) + ctx = efx_find_rss_context_entry(efx, spec->rss_context); + else + ctx = &efx->rss_context; + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { + if (!ctx) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with nonexistent RSS context %u.\n", + spec->rss_context); + invalid_filters++; + goto not_restored; + } + if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with RSS context %u as it was not created.\n", + spec->rss_context); + invalid_filters++; + goto not_restored; + } + } rc = efx_ef10_filter_push(efx, spec, &table->entry[filter_idx].handle, - false); + ctx, false); if (rc) failed++; - spin_lock_bh(&efx->filter_lock); if (rc) { not_restored: @@ -5158,13 +5080,11 @@ not_restored: kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); - } else { - table->entry[filter_idx].spec &= - ~EFX_EF10_FILTER_FLAG_BUSY; } } - spin_unlock_bh(&efx->filter_lock); + mutex_unlock(&efx->rss_lock); + up_write(&table->lock); /* This can happen validly if the MC's capabilities have changed, so * is not an error. @@ -5232,6 +5152,8 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) struct efx_ef10_filter_table *table = efx->filter_state; unsigned int filter_idx; + efx_rwsem_assert_write_locked(&table->lock); + if (*id != EFX_EF10_FILTER_ID_INVALID) { filter_idx = efx_ef10_filter_get_unsafe_id(*id); if (!table->entry[filter_idx].spec) @@ -5267,10 +5189,10 @@ static void efx_ef10_filter_mark_old(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_vlan *vlan; - spin_lock_bh(&efx->filter_lock); + down_write(&table->lock); list_for_each_entry(vlan, &table->vlan_list, list) _efx_ef10_filter_vlan_mark_old(efx, vlan); - spin_unlock_bh(&efx->filter_lock); + up_write(&table->lock); } static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) @@ -5547,10 +5469,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, return rc; } -/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD - * flag or removes these filters, we don't need to hold the filter_lock while - * scanning for these filters. - */ +/* Remove filters that weren't renewed. */ static void efx_ef10_filter_remove_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -5559,6 +5478,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) int rc; int i; + down_write(&table->lock); for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (READ_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { @@ -5570,6 +5490,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) remove_failed++; } } + up_write(&table->lock); if (remove_failed) netif_info(efx, drv, efx->net_dev, @@ -6698,7 +6619,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, #ifdef CONFIG_RFS_ACCEL - .filter_rfs_insert = efx_ef10_filter_rfs_insert, .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, #endif #ifdef CONFIG_SFC_MTD @@ -6784,6 +6704,9 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, + .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -6808,7 +6731,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, #ifdef CONFIG_RFS_ACCEL - .filter_rfs_insert = efx_ef10_filter_rfs_insert, .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, #endif #ifdef CONFIG_SFC_MTD diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 16757cfc5b29..692dd729ee2a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -340,7 +340,10 @@ static int efx_poll(struct napi_struct *napi, int budget) efx_update_irq_mod(efx, channel); } - efx_filter_rfs_expire(channel); +#ifdef CONFIG_RFS_ACCEL + /* Perhaps expire some ARFS filters */ + schedule_work(&channel->filter_work); +#endif /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem @@ -470,6 +473,10 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) tx_queue->channel = channel; } +#ifdef CONFIG_RFS_ACCEL + INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + rx_queue = &channel->rx_queue; rx_queue->efx = efx; timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); @@ -512,6 +519,9 @@ efx_copy_channel(const struct efx_channel *old_channel) rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); +#ifdef CONFIG_RFS_ACCEL + INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif return channel; } @@ -1353,12 +1363,13 @@ static void efx_fini_io(struct efx_nic *efx) pci_disable_device(efx->pci_dev); } -void efx_set_default_rx_indir_table(struct efx_nic *efx) +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx) { size_t i; - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) + ctx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); } @@ -1739,9 +1750,9 @@ static int efx_probe_nic(struct efx_nic *efx) } while (rc == -EAGAIN); if (efx->n_channels > 1) - netdev_rss_key_fill(&efx->rx_hash_key, - sizeof(efx->rx_hash_key)); - efx_set_default_rx_indir_table(efx); + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + efx_set_default_rx_indir_table(efx, &efx->rss_context); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); @@ -1772,7 +1783,6 @@ static int efx_probe_filters(struct efx_nic *efx) { int rc; - spin_lock_init(&efx->filter_lock); init_rwsem(&efx->filter_sem); mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); @@ -2647,6 +2657,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) efx_disable_interrupts(efx); mutex_lock(&efx->mac_lock); + mutex_lock(&efx->rss_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && method != RESET_TYPE_DATAPATH) efx->phy_op->fini(efx); @@ -2700,6 +2711,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) " VFs may not function\n", rc); #endif + if (efx->type->rx_restore_rss_contexts) + efx->type->rx_restore_rss_contexts(efx); + mutex_unlock(&efx->rss_lock); down_read(&efx->filter_sem); efx_restore_filters(efx); up_read(&efx->filter_sem); @@ -2718,6 +2732,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) fail: efx->port_initialized = false; + mutex_unlock(&efx->rss_lock); mutex_unlock(&efx->mac_lock); return rc; @@ -3003,11 +3018,16 @@ static int efx_init_struct(struct efx_nic *efx, efx->type->rx_hash_offset - efx->type->rx_prefix_size; efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; + INIT_LIST_HEAD(&efx->rss_context.list); + mutex_init(&efx->rss_lock); spin_lock_init(&efx->stats_lock); efx->vi_stride = EFX_DEFAULT_VI_STRIDE; efx->num_mac_stats = MC_CMD_MAC_NSTATS; BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); mutex_init(&efx->mac_lock); +#ifdef CONFIG_RFS_ACCEL + mutex_init(&efx->rps_mutex); +#endif efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; INIT_WORK(&efx->mac_work, efx_mac_work); @@ -3072,6 +3092,61 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } +/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because + * (a) this is an infrequent control-plane operation and (b) n is small (max 64) + */ +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx, *new; + u32 id = 1; /* Don't use zero, that refers to the master RSS context */ + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + /* Search for first gap in the numbering */ + list_for_each_entry(ctx, head, list) { + if (ctx->user_id != id) + break; + id++; + /* Check for wrap. If this happens, we have nearly 2^32 + * allocated RSS contexts, which seems unlikely. + */ + if (WARN_ON_ONCE(!id)) + return NULL; + } + + /* Create the new entry */ + new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL); + if (!new) + return NULL; + new->context_id = EFX_EF10_RSS_CONTEXT_INVALID; + new->rx_hash_udp_4tuple = false; + + /* Insert the new entry into the gap */ + new->user_id = id; + list_add_tail(&new->list, &ctx->list); + return new; +} + +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + list_for_each_entry(ctx, head, list) + if (ctx->user_id == id) + return ctx; + return NULL; +} + +void efx_free_rss_context_entry(struct efx_rss_context *ctx) +{ + list_del(&ctx->list); + kfree(ctx); +} + /************************************************************************** * * PCI interface diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 0cddc5ad77b1..a3140e16fcef 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -34,7 +34,8 @@ extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; /* RX */ -void efx_set_default_rx_indir_table(struct efx_nic *efx); +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx); void efx_rx_config_page_split(struct efx_nic *efx); int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); @@ -169,19 +170,31 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); -static inline void efx_filter_rfs_expire(struct efx_channel *channel) +static inline void efx_filter_rfs_expire(struct work_struct *data) { + struct efx_channel *channel = container_of(data, struct efx_channel, + filter_work); + if (channel->rfs_filters_added >= 60 && __efx_filter_rfs_expire(channel->efx, 100)) channel->rfs_filters_added -= 60; } #define efx_filter_rfs_enabled() 1 #else -static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} +static inline void efx_filter_rfs_expire(struct work_struct *data) {} #define efx_filter_rfs_enabled() 0 #endif bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +/* RSS contexts */ +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); +void efx_free_rss_context_entry(struct efx_rss_context *ctx); +static inline bool efx_rss_active(struct efx_rss_context *ctx) +{ + return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID; +} + /* Channels */ int efx_channel_dummy_op_int(struct efx_channel *channel); void efx_channel_dummy_op_void(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 4db2dc2bf52f..3143588ffd77 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -808,7 +808,8 @@ static inline void ip6_fill_mask(__be32 *mask) } static int efx_ethtool_get_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule) + struct ethtool_rx_flow_spec *rule, + u32 *rss_context) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; @@ -964,6 +965,11 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, rule->m_ext.vlan_tci = htons(0xfff); } + if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { + rule->flow_type |= FLOW_RSS; + *rss_context = spec.rss_context; + } + return rc; } @@ -972,6 +978,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct efx_nic *efx = netdev_priv(net_dev); + u32 rss_context = 0; + s32 rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: @@ -979,12 +987,22 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, return 0; case ETHTOOL_GRXFH: { + struct efx_rss_context *ctx = &efx->rss_context; + + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_find_rss_context_entry(efx, info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } info->data = 0; - if (!efx->rss_active) /* No RSS */ - return 0; - switch (info->flow_type) { + if (!efx_rss_active(ctx)) /* No RSS */ + goto out_unlock; + switch (info->flow_type & ~FLOW_RSS) { case UDP_V4_FLOW: - if (efx->rx_hash_udp_4tuple) + if (ctx->rx_hash_udp_4tuple) /* fall through */ case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; @@ -995,7 +1013,7 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, info->data |= RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: - if (efx->rx_hash_udp_4tuple) + if (ctx->rx_hash_udp_4tuple) /* fall through */ case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; @@ -1008,7 +1026,9 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, default: break; } - return 0; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; } case ETHTOOL_GRXCLSRLCNT: @@ -1023,10 +1043,14 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, case ETHTOOL_GRXCLSRULE: if (efx_filter_get_rx_id_limit(efx) == 0) return -EOPNOTSUPP; - return efx_ethtool_get_class_rule(efx, &info->fs); + rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); + if (rc < 0) + return rc; + if (info->fs.flow_type & FLOW_RSS) + info->rss_context = rss_context; + return 0; - case ETHTOOL_GRXCLSRLALL: { - s32 rc; + case ETHTOOL_GRXCLSRLALL: info->data = efx_filter_get_rx_id_limit(efx); if (info->data == 0) return -EOPNOTSUPP; @@ -1036,7 +1060,6 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, return rc; info->rule_cnt = rc; return 0; - } default: return -EOPNOTSUPP; @@ -1054,7 +1077,8 @@ static inline bool ip6_mask_is_empty(__be32 mask[4]) } static int efx_ethtool_set_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule) + struct ethtool_rx_flow_spec *rule, + u32 rss_context) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; @@ -1064,8 +1088,10 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; + enum efx_filter_flags flags = 0; struct efx_filter_spec spec; int rc; @@ -1084,19 +1110,26 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, rule->m_ext.data[1])) return -EINVAL; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, - efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + if (rule->flow_type & FLOW_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); - switch (rule->flow_type & ~FLOW_EXT) { + if (rule->flow_type & FLOW_RSS) + spec.rss_context = rss_context; + + switch (flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO); spec.ether_type = htons(ETH_P_IP); - spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ? - IPPROTO_TCP : IPPROTO_UDP); + spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; if (ip_mask->ip4dst) { if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) return -EINVAL; @@ -1130,8 +1163,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO); spec.ether_type = htons(ETH_P_IPV6); - spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ? - IPPROTO_TCP : IPPROTO_UDP); + spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { if (!ip6_mask_is_full(ip6_mask->ip6dst)) return -EINVAL; @@ -1265,7 +1298,8 @@ static int efx_ethtool_set_rxnfc(struct net_device *net_dev, switch (info->cmd) { case ETHTOOL_SRXCLSRLINS: - return efx_ethtool_set_class_rule(efx, &info->fs); + return efx_ethtool_set_class_rule(efx, &info->fs, + info->rss_context); case ETHTOOL_SRXCLSRLDEL: return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, @@ -1280,7 +1314,9 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table); + if (efx->n_rx_channels == 1) + return 0; + return ARRAY_SIZE(efx->rss_context.rx_indir_table); } static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) @@ -1303,9 +1339,11 @@ static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (indir) - memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + memcpy(indir, efx->rss_context.rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); if (key) - memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size); + memcpy(key, efx->rss_context.rx_hash_key, + efx->type->rx_hash_key_size); return 0; } @@ -1321,13 +1359,109 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, return 0; if (!key) - key = efx->rx_hash_key; + key = efx->rss_context.rx_hash_key; if (!indir) - indir = efx->rx_indir_table; + indir = efx->rss_context.rx_indir_table; return efx->type->rx_push_rss_config(efx, true, indir, key); } +static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + int rc = 0; + + if (!efx->type->rx_pull_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + rc = efx->type->rx_pull_rss_context_config(efx, ctx); + if (rc) + goto out_unlock; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); + if (key) + memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +static int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + bool allocated = false; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + if (delete) { + /* alloc + delete == Nothing to do */ + rc = -EINVAL; + goto out_unlock; + } + ctx = efx_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID; + /* Initialise indir table and key to defaults */ + efx_set_default_rx_indir_table(efx, ctx); + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + allocated = true; + } else { + ctx = efx_find_rss_context_entry(efx, *rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + if (delete) { + /* delete this context */ + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_free_rss_context_entry(ctx); + goto out_unlock; + } + + if (!key) + key = ctx->rx_hash_key; + if (!indir) + indir = ctx->rx_indir_table; + + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc && allocated) + efx_free_rss_context_entry(ctx); + else + *rss_context = ctx->user_id; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + static int efx_ethtool_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) { @@ -1375,6 +1509,36 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev, return ret; } +static int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + if (!efx->phy_op || !efx->phy_op->get_fecparam) + return -EOPNOTSUPP; + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->get_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +static int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + if (!efx->phy_op || !efx->phy_op->get_fecparam) + return -EOPNOTSUPP; + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->set_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + const struct ethtool_ops efx_ethtool_ops = { .get_drvinfo = efx_ethtool_get_drvinfo, .get_regs_len = efx_ethtool_get_regs_len, @@ -1403,9 +1567,13 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, + .get_rxfh_context = efx_ethtool_get_rxfh_context, + .set_rxfh_context = efx_ethtool_set_rxfh_context, .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, .get_link_ksettings = efx_ethtool_get_link_ksettings, .set_link_ksettings = efx_ethtool_set_link_ksettings, + .get_fecparam = efx_ethtool_get_fecparam, + .set_fecparam = efx_ethtool_set_fecparam, }; diff --git a/drivers/net/ethernet/sfc/falcon/enum.h b/drivers/net/ethernet/sfc/falcon/enum.h index 30a1136fc909..4824fcf5c3d4 100644 --- a/drivers/net/ethernet/sfc/falcon/enum.h +++ b/drivers/net/ethernet/sfc/falcon/enum.h @@ -81,7 +81,6 @@ enum ef4_loopback_mode { (1 << LOOPBACK_XAUI) | \ (1 << LOOPBACK_GMII) | \ (1 << LOOPBACK_SGMII) | \ - (1 << LOOPBACK_SGMII) | \ (1 << LOOPBACK_XGBR) | \ (1 << LOOPBACK_XFI) | \ (1 << LOOPBACK_XAUI_FAR) | \ diff --git a/drivers/net/ethernet/sfc/falcon/mtd.c b/drivers/net/ethernet/sfc/falcon/mtd.c index cde593cb1052..2d67e4621a3d 100644 --- a/drivers/net/ethernet/sfc/falcon/mtd.c +++ b/drivers/net/ethernet/sfc/falcon/mtd.c @@ -24,17 +24,8 @@ static int ef4_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) { struct ef4_nic *efx = mtd->priv; - int rc; - rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); - if (rc == 0) { - erase->state = MTD_ERASE_DONE; - } else { - erase->state = MTD_ERASE_FAILED; - erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN; - } - mtd_erase_callback(erase); - return rc; + return efx->type->mtd_erase(mtd, erase->addr, erase->len); } static void ef4_mtd_sync(struct mtd_info *mtd) diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 266b9bee1f3a..4a19c7efdf8d 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1630,12 +1630,12 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) size_t i = 0; efx_dword_t dword; - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != FR_BZ_RX_INDIRECTION_TBL_ROWS); for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, - efx->rx_indir_table[i]); + efx->rss_context.rx_indir_table[i]); efx_writed(efx, &dword, FR_BZ_RX_INDIRECTION_TBL + FR_BZ_RX_INDIRECTION_TBL_STEP * i); @@ -1647,14 +1647,14 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx) size_t i = 0; efx_dword_t dword; - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != FR_BZ_RX_INDIRECTION_TBL_ROWS); for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { efx_readd(efx, &dword, FR_BZ_RX_INDIRECTION_TBL + FR_BZ_RX_INDIRECTION_TBL_STEP * i); - efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); } } @@ -1878,6 +1878,7 @@ struct efx_farch_filter_table { }; struct efx_farch_filter_state { + struct rw_semaphore lock; /* Protects table contents */ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; }; @@ -2032,8 +2033,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, { bool is_full = false; - if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && - gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) return -EINVAL; spec->priority = gen_spec->priority; @@ -2398,9 +2398,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, if (rc) return rc; + down_write(&state->lock); + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; - if (table->size == 0) - return -EINVAL; + if (table->size == 0) { + rc = -EINVAL; + goto out_unlock; + } netif_vdbg(efx, hw, efx->net_dev, "%s: type %d search_limit=%d", __func__, spec.type, @@ -2413,8 +2417,6 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; ins_index = rep_index; - - spin_lock_bh(&efx->filter_lock); } else { /* Search concurrently for * (1) a filter to be replaced (rep_index): any filter @@ -2444,8 +2446,6 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, ins_index = -1; depth = 1; - spin_lock_bh(&efx->filter_lock); - for (;;) { if (!test_bit(i, table->used_bitmap)) { if (ins_index < 0) @@ -2464,7 +2464,7 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, /* Case (b) */ if (ins_index < 0) { rc = -EBUSY; - goto out; + goto out_unlock; } rep_index = -1; break; @@ -2484,11 +2484,11 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, if (spec.priority == saved_spec->priority && !replace_equal) { rc = -EEXIST; - goto out; + goto out_unlock; } if (spec.priority < saved_spec->priority) { rc = -EPERM; - goto out; + goto out_unlock; } if (saved_spec->priority == EFX_FILTER_PRI_AUTO || saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) @@ -2529,8 +2529,8 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, __func__, spec.type, ins_index, spec.dmaq_id); rc = efx_farch_filter_make_id(&spec, ins_index); -out: - spin_unlock_bh(&efx->filter_lock); +out_unlock: + up_write(&state->lock); return rc; } @@ -2605,11 +2605,11 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx, filter_idx = efx_farch_filter_id_index(filter_id); if (filter_idx >= table->size) return -ENOENT; + down_write(&state->lock); spec = &table->spec[filter_idx]; - spin_lock_bh(&efx->filter_lock); rc = efx_farch_filter_remove(efx, table, filter_idx, priority); - spin_unlock_bh(&efx->filter_lock); + up_write(&state->lock); return rc; } @@ -2623,30 +2623,28 @@ int efx_farch_filter_get_safe(struct efx_nic *efx, struct efx_farch_filter_table *table; struct efx_farch_filter_spec *spec; unsigned int filter_idx; - int rc; + int rc = -ENOENT; + + down_read(&state->lock); table_id = efx_farch_filter_id_table_id(filter_id); if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) - return -ENOENT; + goto out_unlock; table = &state->table[table_id]; filter_idx = efx_farch_filter_id_index(filter_id); if (filter_idx >= table->size) - return -ENOENT; + goto out_unlock; spec = &table->spec[filter_idx]; - spin_lock_bh(&efx->filter_lock); - if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { efx_farch_filter_to_gen_spec(spec_buf, spec); rc = 0; - } else { - rc = -ENOENT; } - spin_unlock_bh(&efx->filter_lock); - +out_unlock: + up_read(&state->lock); return rc; } @@ -2659,13 +2657,13 @@ efx_farch_filter_table_clear(struct efx_nic *efx, struct efx_farch_filter_table *table = &state->table[table_id]; unsigned int filter_idx; - spin_lock_bh(&efx->filter_lock); + down_write(&state->lock); for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) efx_farch_filter_remove(efx, table, filter_idx, priority); } - spin_unlock_bh(&efx->filter_lock); + up_write(&state->lock); } int efx_farch_filter_clear_rx(struct efx_nic *efx, @@ -2689,7 +2687,7 @@ u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, unsigned int filter_idx; u32 count = 0; - spin_lock_bh(&efx->filter_lock); + down_read(&state->lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -2702,7 +2700,7 @@ u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, } } - spin_unlock_bh(&efx->filter_lock); + up_read(&state->lock); return count; } @@ -2717,7 +2715,7 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, unsigned int filter_idx; s32 count = 0; - spin_lock_bh(&efx->filter_lock); + down_read(&state->lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -2736,7 +2734,7 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, } } out: - spin_unlock_bh(&efx->filter_lock); + up_read(&state->lock); return count; } @@ -2750,7 +2748,7 @@ void efx_farch_filter_table_restore(struct efx_nic *efx) efx_oword_t filter; unsigned int filter_idx; - spin_lock_bh(&efx->filter_lock); + down_write(&state->lock); for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; @@ -2771,7 +2769,7 @@ void efx_farch_filter_table_restore(struct efx_nic *efx) efx_farch_filter_push_rx_config(efx); efx_farch_filter_push_tx_limits(efx); - spin_unlock_bh(&efx->filter_lock); + up_write(&state->lock); } void efx_farch_filter_table_remove(struct efx_nic *efx) @@ -2865,7 +2863,7 @@ void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) efx_oword_t filter; unsigned int filter_idx; - spin_lock_bh(&efx->filter_lock); + down_write(&state->lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -2897,33 +2895,30 @@ void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) efx_farch_filter_push_rx_config(efx); - spin_unlock_bh(&efx->filter_lock); + up_write(&state->lock); } #ifdef CONFIG_RFS_ACCEL -s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, - struct efx_filter_spec *gen_spec) -{ - return efx_farch_filter_insert(efx, gen_spec, true); -} - bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int index) { struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = - &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + struct efx_farch_filter_table *table; + bool ret = false; + down_write(&state->lock); + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; if (test_bit(index, table->used_bitmap) && table->spec[index].priority == EFX_FILTER_PRI_HINT && rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, flow_id, index)) { efx_farch_filter_table_clear_entry(efx, table, index); - return true; + ret = true; } - return false; + up_write(&state->lock); + return ret; } #endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 8189a1cd973f..59021ad6d98d 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -125,7 +125,9 @@ enum efx_encap_type { * @match_flags: Match type flags, from &enum efx_filter_match_flags * @priority: Priority of the filter, from &enum efx_filter_priority * @flags: Miscellaneous flags, from &enum efx_filter_flags - * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This + * is a user_id (with 0 meaning the driver/default RSS context), not an + * MCFW context_id. * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for * an RX drop filter * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set @@ -173,7 +175,6 @@ struct efx_filter_spec { }; enum { - EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff, EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff }; @@ -185,7 +186,7 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec, memset(spec, 0, sizeof(*spec)); spec->priority = priority; spec->flags = EFX_FILTER_FLAG_RX | flags; - spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; + spec->rss_context = 0; spec->dmaq_id = rxq_id; } diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index f97da05952c7..f17751559ccc 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -298,7 +298,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, attr->limit_value = limit_value; sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attr->name; - attr->dev_attr.attr.mode = S_IRUGO; + attr->dev_attr.attr.mode = 0444; attr->dev_attr.show = reader; hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; } diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 869d76f8f589..3839eec783ea 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -273,7 +273,8 @@ #define MC_CMD_ERR_NO_PRIVILEGE 0x1013 /* Workaround 26807 could not be turned on/off because some functions * have already installed filters. See the comment at - * MC_CMD_WORKAROUND_BUG26807. */ + * MC_CMD_WORKAROUND_BUG26807. + * May also returned for other operations such as sub-variant switching. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 /* The clock whose frequency you've attempted to set set * doesn't exist on this NIC */ @@ -292,6 +293,10 @@ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the * datapath absence may be temporary*/ #define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* The operation could not complete because some PIO buffers are allocated */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b #define MC_CMD_ERR_CODE_OFST 0 @@ -312,10 +317,17 @@ #define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) #define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) #define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) -/* Points to the recovery mode entry point. */ +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ #define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) #define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) #define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -365,7 +377,7 @@ #define MCDI_EVENT_LEVEL_LBN 33 #define MCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MCDI_EVENT_LEVEL_INFO 0x0 +#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -385,21 +397,21 @@ #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 /* enum: Link is down or link speed could not be determined */ -#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 /* enum: 100Mbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum: 1Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum: 10Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum: 40Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 /* enum: 25Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 /* enum: 50Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 /* enum: 100Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 @@ -606,23 +618,23 @@ /* enum: Transmit error */ #define MCDI_EVENT_CODE_TX_ERR 0xb /* enum: Tx flush has completed */ -#define MCDI_EVENT_CODE_TX_FLUSH 0xc +#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum: PTP packet received timestamp */ -#define MCDI_EVENT_CODE_PTP_RX 0xd +#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum: PTP NIC failure */ -#define MCDI_EVENT_CODE_PTP_FAULT 0xe +#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum: PTP PPS event */ -#define MCDI_EVENT_CODE_PTP_PPS 0xf +#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum: Rx flush has completed */ -#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 /* enum: Receive error */ #define MCDI_EVENT_CODE_RX_ERR 0x11 /* enum: AOE fault */ -#define MCDI_EVENT_CODE_AOE 0x12 +#define MCDI_EVENT_CODE_AOE 0x12 /* enum: Network port calibration failed (VCAL). */ -#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 /* enum: HW PPS event */ -#define MCDI_EVENT_CODE_HW_PPS 0x14 +#define MCDI_EVENT_CODE_HW_PPS 0x14 /* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and * a different format) */ @@ -654,7 +666,7 @@ /* enum: Artificial event generated by host and posted via MC for test * purposes. */ -#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CODE_TESTGEN 0xfa #define MCDI_EVENT_CMDDONE_DATA_OFST 0 #define MCDI_EVENT_CMDDONE_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 @@ -784,7 +796,7 @@ #define FCDI_EVENT_LEVEL_LBN 33 #define FCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define FCDI_EVENT_LEVEL_INFO 0x0 +#define FCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define FCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -916,7 +928,7 @@ #define MUM_EVENT_LEVEL_LBN 33 #define MUM_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MUM_EVENT_LEVEL_INFO 0x0 +#define MUM_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MUM_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -1002,7 +1014,9 @@ /***********************************/ /* MC_CMD_READ32 - * Read multiple 32byte words from MC memory. + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_READ32 0x1 @@ -1050,7 +1064,9 @@ /***********************************/ /* MC_CMD_COPYCODE - * Copy MC code between two locations and jump. + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_COPYCODE 0x3 @@ -1139,7 +1155,7 @@ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 /* enum: indicates that the MC wasn't flash booted */ -#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 @@ -1555,11 +1571,10 @@ #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 -/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ #define MC_CMD_PTP_IN_RESET_STATS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_CMD_LEN 4 */ -/* Reset PTP statistics */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ @@ -1710,11 +1725,10 @@ /* enum: External. */ #define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 -/* MC_CMD_PTP_IN_RST_CLK msgrequest */ +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ #define MC_CMD_PTP_IN_RST_CLK_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_CMD_LEN 4 */ -/* Reset value of Timer Reg. */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ @@ -2687,8 +2701,16 @@ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 #define MC_CMD_DRV_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 #define MC_CMD_DRV_PREBOOT_LBN 1 #define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 #define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 @@ -2711,8 +2733,14 @@ * support */ #define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 /* enum: Only this option is allowed for non-admin functions */ -#define MC_CMD_FW_DONT_CARE 0xffffffff +#define MC_CMD_FW_DONT_CARE 0xffffffff /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 @@ -2740,6 +2768,11 @@ * refers to the Sorrento external FPGA port. */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 /***********************************/ @@ -3294,83 +3327,83 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 /* enum: None. */ -#define MC_CMD_LOOPBACK_NONE 0x0 +#define MC_CMD_LOOPBACK_NONE 0x0 /* enum: Data. */ -#define MC_CMD_LOOPBACK_DATA 0x1 +#define MC_CMD_LOOPBACK_DATA 0x1 /* enum: GMAC. */ -#define MC_CMD_LOOPBACK_GMAC 0x2 +#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum: XGMII. */ #define MC_CMD_LOOPBACK_XGMII 0x3 /* enum: XGXS. */ -#define MC_CMD_LOOPBACK_XGXS 0x4 +#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum: XAUI. */ -#define MC_CMD_LOOPBACK_XAUI 0x5 +#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum: GMII. */ -#define MC_CMD_LOOPBACK_GMII 0x6 +#define MC_CMD_LOOPBACK_GMII 0x6 /* enum: SGMII. */ -#define MC_CMD_LOOPBACK_SGMII 0x7 +#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum: XGBR. */ -#define MC_CMD_LOOPBACK_XGBR 0x8 +#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum: XFI. */ -#define MC_CMD_LOOPBACK_XFI 0x9 +#define MC_CMD_LOOPBACK_XFI 0x9 /* enum: XAUI Far. */ -#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum: GMII Far. */ -#define MC_CMD_LOOPBACK_GMII_FAR 0xb +#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum: SGMII Far. */ -#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum: XFI Far. */ -#define MC_CMD_LOOPBACK_XFI_FAR 0xd +#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum: GPhy. */ -#define MC_CMD_LOOPBACK_GPHY 0xe +#define MC_CMD_LOOPBACK_GPHY 0xe /* enum: PhyXS. */ -#define MC_CMD_LOOPBACK_PHYXS 0xf +#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum: PCS. */ -#define MC_CMD_LOOPBACK_PCS 0x10 +#define MC_CMD_LOOPBACK_PCS 0x10 /* enum: PMA-PMD. */ -#define MC_CMD_LOOPBACK_PMAPMD 0x11 +#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum: Cross-Port. */ -#define MC_CMD_LOOPBACK_XPORT 0x12 +#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum: XGMII-Wireside. */ -#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum: XAUI Wireside. */ -#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum: XAUI Wireside Far. */ -#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum: XAUI Wireside near. */ -#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum: GMII Wireside. */ -#define MC_CMD_LOOPBACK_GMII_WS 0x17 +#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum: XFI Wireside. */ -#define MC_CMD_LOOPBACK_XFI_WS 0x18 +#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum: XFI Wireside Far. */ -#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum: PhyXS Wireside. */ -#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum: PMA lanes MAC-Serdes. */ -#define MC_CMD_LOOPBACK_PMA_INT 0x1b +#define MC_CMD_LOOPBACK_PMA_INT 0x1b /* enum: KR Serdes Parallel (Encoder). */ -#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c /* enum: KR Serdes Serial. */ -#define MC_CMD_LOOPBACK_SD_FAR 0x1d +#define MC_CMD_LOOPBACK_SD_FAR 0x1d /* enum: PMA lanes MAC-Serdes Wireside. */ -#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e /* enum: KR Serdes Parallel Wireside (Full PCS). */ -#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f /* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ -#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 /* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ -#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 /* enum: KR Serdes Serial Wireside. */ -#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ -#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 /* enum: Medford Wireside datapath loopback */ -#define MC_CMD_LOOPBACK_DATA_WS 0x24 +#define MC_CMD_LOOPBACK_DATA_WS 0x24 /* enum: Force link up without setting up any physical loopback (snapper use * only) */ -#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -3410,83 +3443,83 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 /* enum: None. */ -/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ /* enum: Data. */ -/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ /* enum: GMAC. */ -/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ /* enum: XGMII. */ /* MC_CMD_LOOPBACK_XGMII 0x3 */ /* enum: XGXS. */ -/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ /* enum: XAUI. */ -/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ /* enum: GMII. */ -/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ /* enum: SGMII. */ -/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ /* enum: XGBR. */ -/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ /* enum: XFI. */ -/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ /* enum: XAUI Far. */ -/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ /* enum: GMII Far. */ -/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ /* enum: SGMII Far. */ -/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ /* enum: XFI Far. */ -/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ /* enum: GPhy. */ -/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ /* enum: PhyXS. */ -/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ /* enum: PCS. */ -/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ /* enum: PMA-PMD. */ -/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ /* enum: Cross-Port. */ -/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ /* enum: XGMII-Wireside. */ -/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ /* enum: XAUI Wireside. */ -/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ /* enum: XAUI Wireside Far. */ -/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ /* enum: XAUI Wireside near. */ -/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ /* enum: GMII Wireside. */ -/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ /* enum: XFI Wireside. */ -/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ /* enum: XFI Wireside Far. */ -/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ /* enum: PhyXS Wireside. */ -/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ /* enum: PMA lanes MAC-Serdes. */ -/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ /* enum: KR Serdes Parallel (Encoder). */ -/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ /* enum: KR Serdes Serial. */ -/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ /* enum: PMA lanes MAC-Serdes Wireside. */ -/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ /* enum: KR Serdes Parallel Wireside (Full PCS). */ -/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ /* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ -/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ /* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ -/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ /* enum: KR Serdes Serial Wireside. */ -/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ /* enum: Near side of AOE Siena side port */ -/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ /* enum: Medford Wireside datapath loopback */ -/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ /* enum: Force link up without setting up any physical loopback (snapper use * only) */ -/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 @@ -3537,6 +3570,37 @@ /* Enum values, see field(s): */ /* 100M */ +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + /***********************************/ /* MC_CMD_GET_LINK @@ -3552,10 +3616,14 @@ /* MC_CMD_GET_LINK_OUT msgresponse */ #define MC_CMD_GET_LINK_OUT_LEN 28 -/* near-side advertised capabilities */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 #define MC_CMD_GET_LINK_OUT_CAP_LEN 4 -/* link-partner advertised capabilities */ +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 #define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 /* Autonegotiated speed in mbit/s. The link may still be down even if this @@ -3598,6 +3666,97 @@ #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_SET_LINK @@ -3610,7 +3769,9 @@ /* MC_CMD_SET_LINK_IN msgrequest */ #define MC_CMD_SET_LINK_IN_LEN 16 -/* ??? */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_SET_LINK_IN_CAP_OFST 0 #define MC_CMD_SET_LINK_IN_CAP_LEN 4 /* Flags */ @@ -3650,9 +3811,9 @@ /* Set LED state. */ #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 #define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 -#define MC_CMD_LED_OFF 0x0 /* enum */ -#define MC_CMD_LED_ON 0x1 /* enum */ -#define MC_CMD_LED_DEFAULT 0x2 /* enum */ +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ /* MC_CMD_SET_ID_LED_OUT msgresponse */ #define MC_CMD_SET_ID_LED_OUT_LEN 0 @@ -3802,53 +3963,53 @@ #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS /* enum: OUI. */ -#define MC_CMD_OUI 0x0 +#define MC_CMD_OUI 0x0 /* enum: PMA-PMD Link Up. */ -#define MC_CMD_PMA_PMD_LINK_UP 0x1 +#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum: PMA-PMD RX Fault. */ -#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum: PMA-PMD TX Fault. */ -#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum: PMA-PMD Signal */ -#define MC_CMD_PMA_PMD_SIGNAL 0x4 +#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum: PMA-PMD SNR A. */ -#define MC_CMD_PMA_PMD_SNR_A 0x5 +#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum: PMA-PMD SNR B. */ -#define MC_CMD_PMA_PMD_SNR_B 0x6 +#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum: PMA-PMD SNR C. */ -#define MC_CMD_PMA_PMD_SNR_C 0x7 +#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum: PMA-PMD SNR D. */ -#define MC_CMD_PMA_PMD_SNR_D 0x8 +#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum: PCS Link Up. */ -#define MC_CMD_PCS_LINK_UP 0x9 +#define MC_CMD_PCS_LINK_UP 0x9 /* enum: PCS RX Fault. */ -#define MC_CMD_PCS_RX_FAULT 0xa +#define MC_CMD_PCS_RX_FAULT 0xa /* enum: PCS TX Fault. */ -#define MC_CMD_PCS_TX_FAULT 0xb +#define MC_CMD_PCS_TX_FAULT 0xb /* enum: PCS BER. */ -#define MC_CMD_PCS_BER 0xc +#define MC_CMD_PCS_BER 0xc /* enum: PCS Block Errors. */ -#define MC_CMD_PCS_BLOCK_ERRORS 0xd +#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum: PhyXS Link Up. */ -#define MC_CMD_PHYXS_LINK_UP 0xe +#define MC_CMD_PHYXS_LINK_UP 0xe /* enum: PhyXS RX Fault. */ -#define MC_CMD_PHYXS_RX_FAULT 0xf +#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum: PhyXS TX Fault. */ -#define MC_CMD_PHYXS_TX_FAULT 0x10 +#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum: PhyXS Align. */ -#define MC_CMD_PHYXS_ALIGN 0x11 +#define MC_CMD_PHYXS_ALIGN 0x11 /* enum: PhyXS Sync. */ -#define MC_CMD_PHYXS_SYNC 0x12 +#define MC_CMD_PHYXS_SYNC 0x12 /* enum: AN link-up. */ -#define MC_CMD_AN_LINK_UP 0x13 +#define MC_CMD_AN_LINK_UP 0x13 /* enum: AN Complete. */ -#define MC_CMD_AN_COMPLETE 0x14 +#define MC_CMD_AN_COMPLETE 0x14 /* enum: AN 10GBaseT Status. */ -#define MC_CMD_AN_10GBT_STATUS 0x15 +#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum: Clause 22 Link-Up. */ -#define MC_CMD_CL22_LINK_UP 0x16 +#define MC_CMD_CL22_LINK_UP 0x16 /* enum: (Last entry) */ -#define MC_CMD_PHY_NSTATS 0x17 +#define MC_CMD_PHY_NSTATS 0x17 /***********************************/ @@ -3910,139 +4071,139 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS -#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ -#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ -#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ -#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ -#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ -#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ -#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ -#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ -#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ -#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ -#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ -#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ -#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ -#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ -#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ -#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ -#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ -#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ -#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ -#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ -#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ -#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ -#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ -#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ -#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ -#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ -#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ -#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ -#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ -#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ -#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ -#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ -#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ -#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ -#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ -#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ -#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ -#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ -#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ -#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ -#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ -#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ -#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ -#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ -#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ -#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ -#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ -#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ -#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ -#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ -#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ -#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ -#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ -#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ -#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ -#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ -#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ -#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ -#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ /* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c /* enum: PM discard_bb_overflow counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d /* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e /* enum: PM discard_vfifo_full counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f /* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 /* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 /* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 /* enum: RXDP counter: Number of packets dropped due to the queue being * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 /* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 * with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 /* enum: RXDP counter: Number of non-host packets. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 /* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 -#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_START 0x40 +#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_END 0x5f +#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum: GENERATION_END value, used together with GENERATION_START to verify * consistency of DMAd data. For legacy firmware / drivers without extended * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * @@ -4054,7 +4215,7 @@ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. */ #define MC_CMD_MAC_GENERATION_END 0x60 -#define MC_CMD_MAC_NSTATS 0x61 /* enum */ +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ /* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 @@ -4067,25 +4228,25 @@ #define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 /* enum: Start of FEC stats buffer space, Medford2 and up */ -#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 /* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) */ -#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 /* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) */ -#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 /* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ -#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 /* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ -#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 /* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ -#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 /* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ -#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 /* enum: This includes the space at offset 103 which is the final * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. */ -#define MC_CMD_MAC_NSTATS_V2 0x68 +#define MC_CMD_MAC_NSTATS_V2 0x68 /* Other enum values, see field(s): */ /* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ @@ -4100,66 +4261,66 @@ #define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 /* enum: Start of CTPIO stats buffer space, Medford2 and up */ -#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 /* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the * target VI */ -#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 /* enum: Number of times a CTPIO send wrote beyond frame end (informational * only) */ -#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 /* enum: Number of CTPIO failures because the TX doorbell was written before * the end of the frame data */ -#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a /* enum: Number of CTPIO failures because the internal FIFO overflowed */ -#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b /* enum: Number of CTPIO failures because the host did not deliver data fast * enough to avoid MAC underflow */ -#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c /* enum: Number of CTPIO failures because the host did not deliver all the * frame data within the timeout */ -#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d /* enum: Number of CTPIO failures because the frame data arrived out of order * or with gaps */ -#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e /* enum: Number of CTPIO failures because the host started a new frame before * completing the previous one */ -#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f /* enum: Number of CTPIO failures because a write was not a multiple of 32 bits * or not 32-bit aligned */ -#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 /* enum: Number of CTPIO fallbacks because another VI on the same port was * sending a CTPIO frame */ -#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 /* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled */ -#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 /* enum: Number of CTPIO fallbacks because length in header was less than 29 * bytes */ -#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 /* enum: Total number of successful CTPIO sends on this port */ -#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 /* enum: Total number of CTPIO fallbacks on this port */ -#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 /* enum: Total number of CTPIO poisoned frames on this port, whether erased or * not */ -#define MC_CMD_MAC_CTPIO_POISON 0x76 +#define MC_CMD_MAC_CTPIO_POISON 0x76 /* enum: Total number of CTPIO erased frames on this port */ -#define MC_CMD_MAC_CTPIO_ERASE 0x77 +#define MC_CMD_MAC_CTPIO_ERASE 0x77 /* enum: This includes the space at offset 120 which is the final * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. */ -#define MC_CMD_MAC_NSTATS_V3 0x79 +#define MC_CMD_MAC_NSTATS_V3 0x79 /* Other enum values, see field(s): */ /* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ @@ -4268,25 +4429,25 @@ #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 -#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ /* A type value of 1 is unused. */ #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 /* enum: Magic */ -#define MC_CMD_WOL_TYPE_MAGIC 0x0 +#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum: MS Windows Magic */ #define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum: IPv4 Syn */ -#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum: IPv6 Syn */ -#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum: Bitmap */ -#define MC_CMD_WOL_TYPE_BITMAP 0x5 +#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum: Link */ -#define MC_CMD_WOL_TYPE_LINK 0x6 +#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum: (Above this for future use) */ -#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_TYPE_MAX 0x7 #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 @@ -4515,6 +4676,8 @@ #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 @@ -4542,6 +4705,8 @@ #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 @@ -4560,7 +4725,11 @@ /* MC_CMD_NVRAM_UPDATE_START * Start a group of update operations on a virtual NVRAM partition. Locks * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if - * PHY_LOCK required and not held). + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. */ #define MC_CMD_NVRAM_UPDATE_START 0x38 @@ -4720,8 +4889,12 @@ /***********************************/ /* MC_CMD_NVRAM_UPDATE_FINISH * Finish a group of update operations on a virtual NVRAM partition. Locks - * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad - * type/offset/length), EACCES (if PHY_LOCK required and not held) + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c @@ -4958,181 +5131,181 @@ #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 #define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 /* enum: Controller temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum: Phy common temperature: degC */ -#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum: Controller cooling: bool */ -#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum: Phy 0 temperature: degC */ -#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum: Phy 0 cooling: bool */ -#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum: Phy 1 temperature: degC */ -#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum: Phy 1 cooling: bool */ -#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum: 1.0v power: mV */ -#define MC_CMD_SENSOR_IN_1V0 0x7 +#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum: 1.2v power: mV */ -#define MC_CMD_SENSOR_IN_1V2 0x8 +#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum: 1.8v power: mV */ -#define MC_CMD_SENSOR_IN_1V8 0x9 +#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum: 2.5v power: mV */ -#define MC_CMD_SENSOR_IN_2V5 0xa +#define MC_CMD_SENSOR_IN_2V5 0xa /* enum: 3.3v power: mV */ -#define MC_CMD_SENSOR_IN_3V3 0xb +#define MC_CMD_SENSOR_IN_3V3 0xb /* enum: 12v power: mV */ -#define MC_CMD_SENSOR_IN_12V0 0xc +#define MC_CMD_SENSOR_IN_12V0 0xc /* enum: 1.2v analogue power: mV */ -#define MC_CMD_SENSOR_IN_1V2A 0xd +#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum: reference voltage: mV */ -#define MC_CMD_SENSOR_IN_VREF 0xe +#define MC_CMD_SENSOR_IN_VREF 0xe /* enum: AOE FPGA power: mV */ -#define MC_CMD_SENSOR_OUT_VAOE 0xf +#define MC_CMD_SENSOR_OUT_VAOE 0xf /* enum: AOE FPGA temperature: degC */ -#define MC_CMD_SENSOR_AOE_TEMP 0x10 +#define MC_CMD_SENSOR_AOE_TEMP 0x10 /* enum: AOE FPGA PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 /* enum: AOE PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_TEMP 0x12 +#define MC_CMD_SENSOR_PSU_TEMP 0x12 /* enum: Fan 0 speed: RPM */ -#define MC_CMD_SENSOR_FAN_0 0x13 +#define MC_CMD_SENSOR_FAN_0 0x13 /* enum: Fan 1 speed: RPM */ -#define MC_CMD_SENSOR_FAN_1 0x14 +#define MC_CMD_SENSOR_FAN_1 0x14 /* enum: Fan 2 speed: RPM */ -#define MC_CMD_SENSOR_FAN_2 0x15 +#define MC_CMD_SENSOR_FAN_2 0x15 /* enum: Fan 3 speed: RPM */ -#define MC_CMD_SENSOR_FAN_3 0x16 +#define MC_CMD_SENSOR_FAN_3 0x16 /* enum: Fan 4 speed: RPM */ -#define MC_CMD_SENSOR_FAN_4 0x17 +#define MC_CMD_SENSOR_FAN_4 0x17 /* enum: AOE FPGA input power: mV */ -#define MC_CMD_SENSOR_IN_VAOE 0x18 +#define MC_CMD_SENSOR_IN_VAOE 0x18 /* enum: AOE FPGA current: mA */ -#define MC_CMD_SENSOR_OUT_IAOE 0x19 +#define MC_CMD_SENSOR_OUT_IAOE 0x19 /* enum: AOE FPGA input current: mA */ -#define MC_CMD_SENSOR_IN_IAOE 0x1a +#define MC_CMD_SENSOR_IN_IAOE 0x1a /* enum: NIC power consumption: W */ -#define MC_CMD_SENSOR_NIC_POWER 0x1b +#define MC_CMD_SENSOR_NIC_POWER 0x1b /* enum: 0.9v power voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9 0x1c +#define MC_CMD_SENSOR_IN_0V9 0x1c /* enum: 0.9v power current: mA */ -#define MC_CMD_SENSOR_IN_I0V9 0x1d +#define MC_CMD_SENSOR_IN_I0V9 0x1d /* enum: 1.2v power current: mA */ -#define MC_CMD_SENSOR_IN_I1V2 0x1e +#define MC_CMD_SENSOR_IN_I1V2 0x1e /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f /* enum: 0.9v power voltage (at ADC): mV */ -#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 /* enum: Controller temperature 2: degC */ -#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 /* enum: Voltage regulator internal temperature: degC */ -#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 /* enum: 0.9V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 /* enum: 1.2V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 /* enum: controller internal temperature sensor voltage (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 /* enum: controller internal temperature (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 /* enum: controller internal temperature sensor voltage (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 /* enum: controller internal temperature (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 /* enum: ambient temperature: degC */ -#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 /* enum: air flow: bool */ -#define MC_CMD_SENSOR_AIRFLOW 0x2a +#define MC_CMD_SENSOR_AIRFLOW 0x2a /* enum: voltage between VSS08D and VSS08D at CSR: mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b /* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ -#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d /* enum: Port 0 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e /* enum: Port 1 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f -/* enum: Mop-up microcontroller reference voltage (millivolts) */ -#define MC_CMD_SENSOR_MUM_VCC 0x30 +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 /* enum: 0.9v power phase A voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_A 0x31 +#define MC_CMD_SENSOR_IN_0V9_A 0x31 /* enum: 0.9v power phase A current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 /* enum: 0.9V voltage regulator phase A temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 /* enum: 0.9v power phase B voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_B 0x34 +#define MC_CMD_SENSOR_IN_0V9_B 0x34 /* enum: 0.9v power phase B current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 /* enum: 0.9V voltage regulator phase B temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 /* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 /* enum: CCOM AVREG 1v2 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 /* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 /* enum: CCOM AVREG 1v8 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a /* enum: CCOM RTS temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f /* enum: controller internal temperature sensor voltage on master core * (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 /* enum: controller internal temperature on master core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 /* enum: controller internal temperature sensor voltage on master core * (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 /* enum: controller internal temperature on master core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 /* enum: controller internal temperature on slave core sensor voltage (internal * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 /* enum: controller internal temperature on slave core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 /* enum: controller internal temperature on slave core sensor voltage (external * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 /* enum: controller internal temperature on slave core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 /* enum: Voltage supplied to the SODIMMs from their power supply: mV */ -#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 /* enum: Temperature of SODIMM 0 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a /* enum: Temperature of SODIMM 1 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b /* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY0_VCC 0x4c +#define MC_CMD_SENSOR_PHY0_VCC 0x4c /* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY1_VCC 0x4d +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* enum: Controller die temperature (TDIODE): degC */ -#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e /* enum: Board temperature (front): degC */ -#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f /* enum: Board temperature (back): degC */ -#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 /* enum: 1.8v power current: mA */ -#define MC_CMD_SENSOR_IN_I1V8 0x51 +#define MC_CMD_SENSOR_IN_I1V8 0x51 /* enum: 2.5v power current: mA */ -#define MC_CMD_SENSOR_IN_I2V5 0x52 +#define MC_CMD_SENSOR_IN_I2V5 0x52 /* enum: 3.3v power current: mA */ -#define MC_CMD_SENSOR_IN_I3V3 0x53 +#define MC_CMD_SENSOR_IN_I3V3 0x53 /* enum: 12v power current: mA */ -#define MC_CMD_SENSOR_IN_I12V0 0x54 +#define MC_CMD_SENSOR_IN_I12V0 0x54 /* enum: 1.3v power: mV */ -#define MC_CMD_SENSOR_IN_1V3 0x55 +#define MC_CMD_SENSOR_IN_1V3 0x55 /* enum: 1.3v power current: mA */ -#define MC_CMD_SENSOR_IN_I1V3 0x56 +#define MC_CMD_SENSOR_IN_I1V3 0x56 /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -5234,17 +5407,17 @@ #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 /* enum: Ok. */ -#define MC_CMD_SENSOR_STATE_OK 0x0 +#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum: Breached warning threshold. */ -#define MC_CMD_SENSOR_STATE_WARNING 0x1 +#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum: Breached fatal threshold. */ -#define MC_CMD_SENSOR_STATE_FATAL 0x2 +#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum: Fault with sensor. */ -#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ -#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 /* enum: Sensor initialisation failed. */ -#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -5327,7 +5500,7 @@ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ -#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 @@ -5416,17 +5589,17 @@ /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless * you're testing firmware, this is what you want. */ -#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 /* enum: Assert using assert(0); */ -#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 /* enum: Deliberately trigger a watchdog */ -#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 /* enum: Deliberately trigger a trap by loading from an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 /* enum: Deliberately trigger a trap by storing to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 /* enum: Jump to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 /* MC_CMD_TESTASSERT_V2_OUT msgresponse */ #define MC_CMD_TESTASSERT_V2_OUT_LEN 0 @@ -5969,7 +6142,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_LOG_OP_OFST 4 #define MC_CMD_MUM_IN_LOG_OP_LEN 4 -#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ #define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 @@ -6464,17 +6637,17 @@ #define EVB_PORT_ID_PORT_ID_OFST 0 #define EVB_PORT_ID_PORT_ID_LEN 4 /* enum: An invalid port handle. */ -#define EVB_PORT_ID_NULL 0x0 +#define EVB_PORT_ID_NULL 0x0 /* enum: The port assigned to this function.. */ -#define EVB_PORT_ID_ASSIGNED 0x1000000 +#define EVB_PORT_ID_ASSIGNED 0x1000000 /* enum: External network port 0 */ -#define EVB_PORT_ID_MAC0 0x2000000 +#define EVB_PORT_ID_MAC0 0x2000000 /* enum: External network port 1 */ -#define EVB_PORT_ID_MAC1 0x2000001 +#define EVB_PORT_ID_MAC1 0x2000001 /* enum: External network port 2 */ -#define EVB_PORT_ID_MAC2 0x2000002 +#define EVB_PORT_ID_MAC2 0x2000002 /* enum: External network port 3 */ -#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_MAC3 0x2000003 #define EVB_PORT_ID_PORT_ID_LBN 0 #define EVB_PORT_ID_PORT_ID_WIDTH 32 @@ -6486,7 +6659,7 @@ #define EVB_VLAN_TAG_MODE_LBN 12 #define EVB_VLAN_TAG_MODE_WIDTH 4 /* enum: Insert the VLAN. */ -#define EVB_VLAN_TAG_INSERT 0x0 +#define EVB_VLAN_TAG_INSERT 0x0 /* enum: Replace the VLAN if already present. */ #define EVB_VLAN_TAG_REPLACE 0x1 @@ -6515,110 +6688,110 @@ #define NVRAM_PARTITION_TYPE_ID_OFST 0 #define NVRAM_PARTITION_TYPE_ID_LEN 2 /* enum: Primary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 /* enum: Secondary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 /* enum: Expansion ROM partition */ -#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 /* enum: Static configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 /* enum: Dynamic configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 /* enum: Expansion ROM configuration data for port 0 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 /* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 /* enum: Expansion ROM configuration data for port 1 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 /* enum: Expansion ROM configuration data for port 2 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 /* enum: Expansion ROM configuration data for port 3 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 /* enum: Non-volatile log output partition */ -#define NVRAM_PARTITION_TYPE_LOG 0x700 +#define NVRAM_PARTITION_TYPE_LOG 0x700 /* enum: Non-volatile log output of second core on dual-core device */ -#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 /* enum: Device state dump output partition */ -#define NVRAM_PARTITION_TYPE_DUMP 0x800 +#define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ -#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 /* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff /* enum: Primary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 /* enum: Secondary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 /* enum: FC firmware partition */ -#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 /* enum: FC License partition */ -#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 /* enum: Non-volatile log output partition for FC */ -#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 /* enum: MUM firmware partition */ -#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 /* enum: SUC firmware partition (this is intentionally an alias of * MUM_FIRMWARE) */ -#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 /* enum: MUM Non-volatile log output partition. */ -#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 /* enum: MUM Application table partition. */ -#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 /* enum: MUM boot rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 /* enum: MUM production signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 /* enum: MUM user signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 /* enum: MUM fuses and lockbits partition. */ -#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: UEFI expansion ROM if separate from PXE */ -#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 /* enum: Used by the expansion ROM for logging */ -#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 /* enum: Used for XIP code of shmbooted images */ -#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 /* enum: Spare partition 2 */ -#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 /* enum: Manufacturing partition. Used during manufacture to pass information * between XJTAG and Manftest. */ -#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 /* enum: Spare partition 4 */ -#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 /* enum: Spare partition 5 */ -#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 /* enum: Partition for reporting MC status. See mc_flash_layout.h * medford_mc_status_hdr_t for layout on Medford. */ -#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 /* enum: Spare partition 13 */ -#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 /* enum: Spare partition 14 */ -#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 /* enum: Spare partition 15 */ -#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 /* enum: Spare partition 16 */ -#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 /* enum: Factory defaults for dynamic configuration */ -#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 /* enum: Factory defaults for expansion ROM configuration */ -#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 /* enum: Field Replaceable Unit inventory information for use on IPMI * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a * subset of the information stored in this partition. */ -#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 /* enum: Start of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd /* enum: Recovery partition map (provided if real map is missing or corrupt) */ -#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe /* enum: Partition map (real map as stored in flash) */ -#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff #define NVRAM_PARTITION_TYPE_ID_LBN 0 #define NVRAM_PARTITION_TYPE_ID_WIDTH 16 @@ -6627,37 +6800,37 @@ #define LICENSED_APP_ID_ID_OFST 0 #define LICENSED_APP_ID_ID_LEN 4 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 /* enum: SolarSecure filter engine */ -#define LICENSED_APP_ID_SOLARSECURE 0x8 +#define LICENSED_APP_ID_SOLARSECURE 0x8 /* enum: Performance monitor */ -#define LICENSED_APP_ID_PERF_MONITOR 0x10 +#define LICENSED_APP_ID_PERF_MONITOR 0x10 /* enum: SolarCapture Live */ -#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 /* enum: Capture SolarSystem */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 /* enum: Network Access Control */ -#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 /* enum: TCP Direct */ -#define LICENSED_APP_ID_TCP_DIRECT 0x100 +#define LICENSED_APP_ID_TCP_DIRECT 0x100 /* enum: Low Latency */ -#define LICENSED_APP_ID_LOW_LATENCY 0x200 +#define LICENSED_APP_ID_LOW_LATENCY 0x200 /* enum: SolarCapture Tap */ -#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 /* enum: Capture SolarSystem 40G */ #define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 /* enum: Capture SolarSystem 1G */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 /* enum: ScaleOut Onload */ -#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 /* enum: SCS Network Analytics Dashboard */ -#define LICENSED_APP_ID_DSHBRD 0x4000 +#define LICENSED_APP_ID_DSHBRD 0x4000 /* enum: SolarCapture Trading Analytics */ -#define LICENSED_APP_ID_SCATRD 0x8000 +#define LICENSED_APP_ID_SCATRD 0x8000 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 @@ -6775,23 +6948,23 @@ #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 /* enum: This is a TX completion event, not a timestamp */ -#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 /* enum: This is a TX completion event for a CTPIO transmit. The event format * is the same as for TX_EV_COMPLETION. */ -#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 /* enum: This is the low part of a TX timestamp for a CTPIO transmission. The * event format is the same as for TX_EV_TSTAMP_LO */ -#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 /* enum: This is the high part of a TX timestamp for a CTPIO transmission. The * event format is the same as for TX_EV_TSTAMP_HI */ -#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 /* enum: This is the low part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 /* enum: This is the high part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 /* upper 16 bits of timestamp data */ @@ -7071,17 +7244,17 @@ #define QUEUE_CRC_MODE_MODE_LBN 0 #define QUEUE_CRC_MODE_MODE_WIDTH 4 /* enum: No CRC. */ -#define QUEUE_CRC_MODE_NONE 0x0 +#define QUEUE_CRC_MODE_NONE 0x0 /* enum: CRC Fiber channel over ethernet. */ -#define QUEUE_CRC_MODE_FCOE 0x1 +#define QUEUE_CRC_MODE_FCOE 0x1 /* enum: CRC (digest) iSCSI header only. */ -#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 /* enum: CRC (digest) iSCSI header and payload. */ -#define QUEUE_CRC_MODE_ISCSI 0x3 +#define QUEUE_CRC_MODE_ISCSI 0x3 /* enum: CRC Fiber channel over IP over ethernet. */ -#define QUEUE_CRC_MODE_FCOIPOE 0x4 +#define QUEUE_CRC_MODE_FCOIPOE 0x4 /* enum: CRC MPA. */ -#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_MPA 0x5 #define QUEUE_CRC_MODE_SPARE_LBN 4 #define QUEUE_CRC_MODE_SPARE_WIDTH 4 @@ -7157,11 +7330,15 @@ /* Size, in entries */ #define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 #define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 -/* The EVQ to send events to. This is an index originally specified to INIT_EVQ +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. */ #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 -/* The value to put in the event data. Check hardware spec. for valid range. */ +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE + * == PACKED_STREAM. + */ #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function @@ -7189,18 +7366,25 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 /* enum: One packet per descriptor (for normal networking) */ -#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 /* enum: Pack multiple packets into large descriptors (for SolarCapture) */ -#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 @@ -7221,12 +7405,122 @@ #define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 #define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_PACKED_STREAM format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_PACKED_STREAM format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 /* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ #define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -7466,7 +7760,7 @@ #define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 -#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 @@ -7479,7 +7773,7 @@ #define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 #define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 /* enum: An invalid handle. */ -#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 #define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 #define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 /* The requesting physical function number */ @@ -7748,17 +8042,17 @@ #define MC_CMD_FILTER_OP_IN_OP_OFST 0 #define MC_CMD_FILTER_OP_IN_OP_LEN 4 /* enum: single-recipient filter insert */ -#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 /* enum: single-recipient filter remove */ -#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 /* enum: multi-recipient filter subscribe */ -#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 /* enum: multi-recipient filter unsubscribe */ -#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 /* enum: replace one recipient with another (warning - the filter handle may * change) */ -#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 /* filter handle (for remove / unsubscribe operations) */ #define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 #define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 @@ -7803,15 +8097,15 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 #define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 #define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 @@ -7819,14 +8113,14 @@ #define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 #define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. @@ -7843,7 +8137,7 @@ #define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 #define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 @@ -7971,15 +8265,15 @@ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 #define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 @@ -7987,14 +8281,14 @@ #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. @@ -8011,7 +8305,7 @@ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 @@ -8054,17 +8348,17 @@ #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 /* enum: Match VXLAN traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 /* enum: Match Geneve traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 /* enum: Reserved for experimental development use */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 /* enum: Match NVGRE traffic with this VSID */ -#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -8131,6 +8425,273 @@ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. + */ +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + /* MC_CMD_FILTER_OP_OUT msgresponse */ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ @@ -8147,9 +8708,9 @@ #define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 #define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 /* enum: guaranteed invalid filter handle (low 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff /* enum: guaranteed invalid filter handle (high 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff /* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ #define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 @@ -8184,20 +8745,20 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 /* enum: read the list of supported RX filter matches */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 /* enum: read flags indicating restrictions on filter insertion for the calling * client */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 /* enum: read properties relating to security rules (Medford-only; for use by * SolarSecure apps, not directly by drivers. See SF-114946-SW.) */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 /* enum: read the list of supported RX filter matches for VXLAN/NVGRE * encapsulated frames, which follow a different match sequence to normal * frames (Medford only) */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -8238,7 +8799,9 @@ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. * Please note that this interface is only of use to debug tools which have * knowledge of firmware and hardware data structures; nothing here is intended - * for use by normal driver code. + * for use by normal driver code. Note that although this command is in the + * Admin privilege group, in tamperproof adapters, only read operations are + * permitted. */ #define MC_CMD_PARSER_DISP_RW 0xe5 @@ -8250,32 +8813,36 @@ #define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 #define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 /* enum: RX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 /* enum: Lookup engine (with original metadata format). Deprecated; used only * by cmdclient as a fallback for very old Huntington firmware, and not * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA * instead. */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 /* enum: RX1 dispatcher CPU (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 /* enum: Miscellaneous other state (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 #define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 /* enum: Read a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 -/* enum: Write a word of DICPU DMEM or a LUE entry. */ -#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 -/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */ -#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on + * tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not + * permitted on tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 /* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 @@ -8283,7 +8850,7 @@ #define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 #define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 /* enum: Port to datapath mapping */ -#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 @@ -8317,8 +8884,8 @@ #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 -#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ -#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ /***********************************/ @@ -8783,13 +9350,13 @@ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* enum: MISC. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 /* enum: IDO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 /* enum: RO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 /* enum: TPH Type. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 @@ -8920,57 +9487,57 @@ */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ /* Target for download. (These match the blob numbers defined in * mc_flash_layout.h.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf /* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 /* enum: Last chunk, containing checksum rather than data */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff /* enum: Abort download of this item */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe /* Length of this chunk in bytes */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 @@ -8989,21 +9556,21 @@ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 #define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 /* enum: Code download OK, completed. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 /* enum: Code download aborted as requested. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 /* enum: Code download OK so far, send next chunk. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 /* enum: Download phases out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 /* enum: Bad target for this phase */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 /* enum: Chunk ID out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 /* enum: Chunk length zero or too large */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 /* enum: Checksum was incorrect */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 /***********************************/ @@ -9087,54 +9654,58 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -9144,41 +9715,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -9188,34 +9761,36 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 @@ -9293,54 +9868,58 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -9350,41 +9929,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -9394,34 +9975,36 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 @@ -9469,6 +10052,18 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -9482,18 +10077,18 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -9501,9 +10096,9 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -9592,54 +10187,58 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -9649,41 +10248,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -9693,34 +10294,36 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 @@ -9768,6 +10371,18 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -9781,18 +10396,18 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -9800,9 +10415,9 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -9833,11 +10448,11 @@ /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ @@ -9916,54 +10531,58 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -9973,41 +10592,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10017,34 +10638,36 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: reserved value - do not use (bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 #define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 @@ -10092,6 +10715,18 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -10105,18 +10740,18 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -10124,9 +10759,9 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -10157,11 +10792,11 @@ /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ @@ -10201,7 +10836,16 @@ #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 #define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 -#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 /***********************************/ @@ -10412,15 +11056,15 @@ #define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 #define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 /* enum: MUX */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 /* enum: Snapper specific; semantics TBD */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 @@ -10495,23 +11139,23 @@ #define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 #define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 /* enum: VEB (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 /* enum: A normal v-port receives packets which match a specified MAC and/or * VLAN. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 /* enum: An expansion v-port packets traffic which don't match any other * v-port. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 /* enum: An test v-port receives packets which match any filters installed by * its downstream components. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 /* Flags controlling v-port creation */ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 @@ -10595,7 +11239,7 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 /* enum: Derive the MAC address from the upstream port */ -#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ #define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 @@ -10809,12 +11453,12 @@ /* enum: Allocate a context for exclusive use. The key and indirection table * must be explicitly configured. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 /* enum: Allocate a context for shared use; this will spread across a range of * queues, but the key and indirection table are pre-configured and may not be * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 /* Number of queues spanned by this context, in the range 1-64; valid offsets * in the indirection table will be in the range 0 to NUM_QUEUES-1. */ @@ -10830,7 +11474,7 @@ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 /* enum: guaranteed invalid RSS context handle value */ -#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -11073,7 +11717,7 @@ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 /* enum: guaranteed invalid .1p mapping handle value */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -11385,11 +12029,11 @@ #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 /* enum: pad to 64 bytes */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 /* enum: pad to 128 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 /* enum: pad to 256 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 /* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 @@ -11453,37 +12097,37 @@ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 /* enum: Leave the system clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 /* enum: Leave the inter-core clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 /* enum: Leave the DPCPU clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for PCS clock domain */ #define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 #define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 /* enum: Leave the PCS clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for MC clock domain */ #define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 #define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 /* enum: Leave the MC clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for rmon clock domain */ #define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 #define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 /* enum: Leave the rmon clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for vswitch clock domain */ #define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 #define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 /* enum: Leave the vswitch clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ #define MC_CMD_SET_CLOCK_OUT_LEN 28 @@ -11491,37 +12135,37 @@ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 /* enum: The system clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 /* enum: The inter-core clock domain doesn't exist / isn't used */ -#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /* enum: The dpcpu clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 /* Resulting PCS frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 #define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 /* enum: The PCS clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 /* Resulting MC frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 #define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 /* enum: The MC clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 /* Resulting rmon frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 #define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 /* enum: The rmon clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 /* Resulting vswitch frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 #define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 /* enum: The vswitch clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -11537,21 +12181,21 @@ #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 #define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 /* enum: RxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 /* enum: RxDPCPU1 (Medford only) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 /* enum: RxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_RX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 /* enum: TxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_TX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -11559,15 +12203,15 @@ #define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 @@ -11578,11 +12222,11 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 @@ -11591,9 +12235,9 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 @@ -11660,7 +12304,7 @@ #define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 #define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 /* enum: Copy slave_data section to the slave core. (Greenport only) */ -#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 /* MC_CMD_SHMBOOT_OP_OUT msgresponse */ #define MC_CMD_SHMBOOT_OP_OUT_LEN 0 @@ -11709,14 +12353,14 @@ #define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 @@ -11727,24 +12371,24 @@ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 /* enum: The uart port this command was received over (if using a uart * transport) */ -#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ @@ -11854,11 +12498,11 @@ #define MC_CMD_SET_PSU_IN_LEN 12 #define MC_CMD_SET_PSU_IN_PARAM_OFST 0 #define MC_CMD_SET_PSU_IN_PARAM_LEN 4 -#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ #define MC_CMD_SET_PSU_IN_RAIL_OFST 4 #define MC_CMD_SET_PSU_IN_RAIL_LEN 4 -#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ -#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ /* desired value, eg voltage in mV */ #define MC_CMD_SET_PSU_IN_VALUE_OFST 8 #define MC_CMD_SET_PSU_IN_VALUE_LEN 4 @@ -12031,26 +12675,30 @@ #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 /* enum: Force KR Serdes reset / recalibration */ -#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 /* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid * signal. */ -#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Read Figure Of Merit (eye quality, higher is better). */ -#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +/* enum: Start/stop link training frames */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8 +/* enum: Issue KR link training command (control training coefficients) */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -12084,98 +12732,98 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 /* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 /* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 /* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 /* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 /* enum: Edge DFE DLEV (0-128 for Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 /* enum: Variable Gain Amplifier (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 /* enum: CTLE EQ Capacitor (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (0-7, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa /* enum: CTLE gain (0-31, Medford2) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb /* enum: CTLE pole (0-31, Medford2) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc /* enum: CTLE peaking (0-31, Medford2) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd /* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe /* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf /* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 /* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 /* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 /* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 /* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 /* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 /* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 /* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 /* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 /* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 /* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a /* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b /* enum: Negative h1 polarity data sampler offset calibration code, even path * (Medford2 - 6 bit signed (-29 - +29))) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c /* enum: Negative h1 polarity data sampler offset calibration code, odd path * (Medford2 - 6 bit signed (-29 - +29))) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d /* enum: Positive h1 polarity data sampler offset calibration code, even path * (Medford2 - 6 bit signed (-29 - +29))) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e /* enum: Positive h1 polarity data sampler offset calibration code, odd path * (Medford2 - 6 bit signed (-29 - +29))) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f /* enum: CDR calibration loop code (Medford2) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 /* enum: CDR integral loop code (Medford2) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 @@ -12241,38 +12889,38 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TX Amplitude (Huntington, Medford, Medford2) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 /* enum: De-Emphasis Tap1 Fine */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 /* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 /* enum: De-Emphasis Tap2 Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 /* enum: Pre-Emphasis Magnitude (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 /* enum: Pre-Emphasis Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 /* enum: TX Slew Rate Coarse control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 /* enum: TX Termination Impedance control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 /* enum: TX Amplitude Fine control (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa /* enum: Pre-shoot Tap (Medford, Medford2) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb /* enum: De-emphasis Tap (Medford, Medford2) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 @@ -12345,9 +12993,12 @@ /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 -/* Port-relative lane to scan eye on */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 /* Scan duration / cycle count */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 @@ -12383,12 +13034,91 @@ #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ #define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 #define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 #define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 +/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4 +/* Set INITIALIZE state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4 +/* Set PRESET state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4 +/* C(-1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */ +/* C(0) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24 +/* C(-1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */ +/* C(0) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -12406,22 +13136,22 @@ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 /* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ -#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ -#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 +#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 @@ -12455,46 +13185,46 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 /* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 /* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 /* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 /* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 /* enum: DFE DLev */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 /* enum: Figure of Merit */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 /* enum: CTLE EQ Capacitor (HF Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (DC Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 @@ -12558,15 +13288,15 @@ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TxMargin (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 /* enum: TxSwing (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 /* enum: De-emphasis coefficient C(-1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 /* enum: De-emphasis coefficient C(0) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 /* enum: De-emphasis coefficient C(+1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 /* Enum values, see field(s): */ @@ -12632,9 +13362,9 @@ /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses */ -#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 /* MC_CMD_LICENSING_OUT msgresponse */ #define MC_CMD_LICENSING_OUT_LEN 28 @@ -12665,9 +13395,9 @@ #define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 #define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 /***********************************/ @@ -12687,11 +13417,11 @@ /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses Returns EAGAIN if license * processing (updating) has been started but not yet completed. */ -#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 /* MC_CMD_LICENSING_V3_OUT msgresponse */ #define MC_CMD_LICENSING_V3_OUT_LEN 88 @@ -12718,9 +13448,9 @@ #define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 #define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 /* bitmask of licensed applications */ #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 @@ -12806,9 +13536,9 @@ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 #define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -12837,9 +13567,9 @@ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -12891,9 +13621,9 @@ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 #define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 /* enum: validate application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 /* enum: mask application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -12984,9 +13714,9 @@ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 /* enum: expiry units are accounting units */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 /* enum: expiry units are calendar days */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 /* base MAC address of the NIC stored in NVRAM (note that this is a constant * value for a given NIC regardless which function is calling, effectively this * is PF0 base MAC address) @@ -13019,9 +13749,9 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 /* enum: turn the features off */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 /* enum: turn the features back on */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 /* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 @@ -13048,15 +13778,15 @@ * This is an asynchronous operation owing to the time taken to validate an * ECDSA license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 /* enum: clear the license immediately rather than waiting for the next power * cycle */ -#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 /* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET * operation */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 @@ -13082,13 +13812,13 @@ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 /* enum: finished validating and installing license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 /* enum: license validation and installation in progress */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 /* enum: licensing error. More specific error messages are not provided to * avoid exposing details of the licensing system to the client */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 /* bitmask of licensed features */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 @@ -13124,9 +13854,9 @@ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. @@ -13146,7 +13876,7 @@ */ #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 -#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -13167,9 +13897,9 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 @@ -13193,12 +13923,12 @@ /* enum: Per-TXQ enable for multicast UDP destination lookup for possible * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 /* enum: Per-v-adaptor enable for suppression of self-transmissions on the * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single * boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 /* handle for the entity to update: queue handle, EVB port ID, etc. depending * on the type of configuration setting being changed */ @@ -13278,9 +14008,9 @@ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. @@ -13300,7 +14030,7 @@ */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc -#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -13319,9 +14049,9 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 @@ -13431,9 +14161,9 @@ #define MC_CMD_READ_ATB_IN_LEN 16 #define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 #define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 -#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ #define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 #define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 #define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 @@ -13503,46 +14233,46 @@ #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 -#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ /* New privilege mask to be set. The mask will only be changed if the MSB is * set to 1. */ #define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 #define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ /* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ /* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC * adress. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 /* enum: Privilege that allows a Function to change the MAC address configured * in its associated vAdapter/vPort. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 /* enum: Privilege that allows a Function to install filters that specify VLANs * that are not in the permit list for the associated vPort. This privilege is * primarily to support ESX where vPorts are created that restrict traffic to * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 /* enum: Privilege for insecure commands. Commands that belong to this group * are not permitted on secure adapters regardless of the privilege mask. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ -#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 /* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 @@ -13573,12 +14303,12 @@ /* New link state mode to be set */ #define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 #define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ /* enum: Use this value to just read the existing setting without modifying it. */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ #define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 @@ -13674,12 +14404,12 @@ /* The groups of functions to have their privilege masks modified. */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 #define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ /* For VFS_OF_PF specify the PF, for ONE specify the target function */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 @@ -13782,11 +14512,11 @@ /* Sector type */ #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 -#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ /* Sector data */ #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 @@ -14001,18 +14731,18 @@ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 /* enum: the IANA allocated UDP port for VXLAN */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 /* enum: the IANA allocated UDP port for Geneve */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 /* tunnel encapsulation protocol (only those named below are supported) */ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 /* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 /* enum: This port will be used for Geneve on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 @@ -14180,10 +14910,10 @@ /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 #define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ /* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ #define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 @@ -14269,7 +14999,7 @@ */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d -#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 @@ -14281,9 +15011,9 @@ /* Will the common pool be used as TX_vFIFO_ULL (1) */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 /* Number of buffers to reserve for the common pool */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 @@ -14291,20 +15021,20 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 /* enum: Extracts information from function */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 /* Network port or RX Engine to which the common pool connects. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 /* enum: Extracts information from function */ -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 /* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 @@ -14320,7 +15050,7 @@ */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e -#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 @@ -14332,20 +15062,20 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 /* enum: Extracts information from common pool */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 /* Minimum number of buffers that the pool must have */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 /* enum: Do not check the space available */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 /* Will the vFIFO be used as TX_vFIFO_ULL */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 @@ -14353,7 +15083,7 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 /* enum: Search for the lowest unused priority */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 @@ -14372,7 +15102,7 @@ */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f -#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 @@ -14391,7 +15121,7 @@ */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 -#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 @@ -14410,7 +15140,7 @@ */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 -#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index ce8aabf9091e..9382bb0b4d5a 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -352,6 +352,64 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx, link_state->speed = speed; } +/* The semantics of the ethtool FEC mode bitmask are not well defined, + * particularly the meaning of combinations of bits. Which means we get to + * define our own semantics, as follows: + * OFF overrides any other bits, and means "disable all FEC" (with the + * exception of 25G KR4/CR4, where it is not possible to reject it if AN + * partner requests it). + * AUTO on its own means use cable requirements and link partner autoneg with + * fw-default preferences for the cable type. + * AUTO and either RS or BASER means use the specified FEC type if cable and + * link partner support it, otherwise autoneg/fw-default. + * RS or BASER alone means use the specified FEC type if cable and link partner + * support it and either requests it, otherwise no FEC. + * Both RS and BASER (whether AUTO or not) means use FEC if cable and link + * partner support it, preferring RS to BASER. + */ +static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap) +{ + u32 ret = 0; + + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap & ETHTOOL_FEC_AUTO) + ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_LBN); + if (ethtool_cap & ETHTOOL_FEC_RS) + ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN); + if (ethtool_cap & ETHTOOL_FEC_BASER) + ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN); + return ret; +} + +/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function + * can never produce, (baser xor rs) and neither req; the implementation below + * maps both of those to AUTO. This should never matter, and it's not clear + * what a better mapping would be anyway. + */ +static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) +{ + bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN), + rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN), + baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN), + baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN); + + if (!baser && !rs) + return ETHTOOL_FEC_OFF; + return (rs_req ? ETHTOOL_FEC_RS : 0) | + (baser_req ? ETHTOOL_FEC_BASER : 0) | + (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO); +} + static int efx_mcdi_phy_probe(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_data; @@ -438,6 +496,13 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + /* Record the initial FEC configuration (or nearest approximation + * representable in the ethtool configuration space) + */ + efx->fec_config = mcdi_fec_caps_to_ethtool(caps, + efx->link_state.speed == 25000 || + efx->link_state.speed == 50000); + /* Default to Autonegotiated flow control if the PHY supports it */ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) @@ -458,6 +523,8 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx) ethtool_linkset_to_mcdi_cap(efx->link_advertising) : phy_cfg->forced_cap); + caps |= ethtool_fec_caps_to_mcdi(efx->fec_config); + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), efx->loopback_mode, 0); } @@ -584,6 +651,8 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, } } + caps |= ethtool_fec_caps_to_mcdi(efx->fec_config); + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), efx->loopback_mode, 0); if (rc) @@ -599,6 +668,85 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, return 0; } +static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fec) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN); + u32 caps, active, speed; /* MCDI format */ + bool is_25g = false; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN) + return -EOPNOTSUPP; + + /* behaviour for 25G/50G links depends on 25G BASER bit */ + speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED); + is_25g = speed == 25000 || speed == 50000; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP); + fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g); + /* BASER is never supported on 100G */ + if (speed == 100000) + fec->fec &= ~ETHTOOL_FEC_BASER; + + active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE); + switch (active) { + case MC_CMD_FEC_NONE: + fec->active_fec = ETHTOOL_FEC_OFF; + break; + case MC_CMD_FEC_BASER: + fec->active_fec = ETHTOOL_FEC_BASER; + break; + case MC_CMD_FEC_RS: + fec->active_fec = ETHTOOL_FEC_RS; + break; + default: + netif_warn(efx, hw, efx->net_dev, + "Firmware reports unrecognised FEC_TYPE %u\n", + active); + /* We don't know what firmware has picked. AUTO is as good a + * "can't happen" value as any other. + */ + fec->active_fec = ETHTOOL_FEC_AUTO; + break; + } + + return 0; +} + +static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from + * saved advertising bits + */ + if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising)) + caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + else + caps = phy_cfg->forced_cap; + + caps |= ethtool_fec_caps_to_mcdi(fec->fec); + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + /* Record the new FEC setting for subsequent set_link calls */ + efx->fec_config = fec->fec; + return 0; +} + static int efx_mcdi_phy_test_alive(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); @@ -977,6 +1125,8 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = { .remove = efx_mcdi_phy_remove, .get_link_ksettings = efx_mcdi_phy_get_link_ksettings, .set_link_ksettings = efx_mcdi_phy_set_link_ksettings, + .get_fecparam = efx_mcdi_phy_get_fecparam, + .set_fecparam = efx_mcdi_phy_set_fecparam, .test_alive = efx_mcdi_phy_test_alive, .run_tests = efx_mcdi_phy_run_tests, .test_name = efx_mcdi_phy_test_name, diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index a77a8bd2dd70..4ac30b6e5dab 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -24,17 +24,8 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) { struct efx_nic *efx = mtd->priv; - int rc; - rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); - if (rc == 0) { - erase->state = MTD_ERASE_DONE; - } else { - erase->state = MTD_ERASE_FAILED; - erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN; - } - mtd_erase_callback(erase); - return rc; + return efx->type->mtd_erase(mtd, erase->addr, erase->len); } static void efx_mtd_sync(struct mtd_info *mtd) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d20a8660ee48..5e379a83c729 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -430,6 +430,7 @@ enum efx_sync_events_state { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score + * @filter_work: Work item for efx_filter_rfs_expire() * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors @@ -475,6 +476,7 @@ struct efx_channel { unsigned int irq_mod_score; #ifdef CONFIG_RFS_ACCEL unsigned int rfs_filters_added; + struct work_struct filter_work; #define RPS_FLOW_ID_INVALID 0xFFFFFFFF u32 *rps_flow_id; #endif @@ -627,6 +629,8 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left, * Serialised by the mac_lock. * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock. * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock. + * @get_fecparam: Get Forward Error Correction settings. Serialised by mac_lock. + * @set_fecparam: Set Forward Error Correction settings. Serialised by mac_lock. * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @test_alive: Test that PHY is 'alive' (online) @@ -645,6 +649,9 @@ struct efx_phy_operations { struct ethtool_link_ksettings *cmd); int (*set_link_ksettings)(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd); + int (*get_fecparam)(struct efx_nic *efx, struct ethtool_fecparam *fec); + int (*set_fecparam)(struct efx_nic *efx, + const struct ethtool_fecparam *fec); void (*set_npage_adv) (struct efx_nic *efx, u32); int (*test_alive) (struct efx_nic *efx); const char *(*test_name) (struct efx_nic *efx, unsigned int index); @@ -704,6 +711,28 @@ union efx_multicast_hash { struct vfdi_status; +/* The reserved RSS context value */ +#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff +/** + * struct efx_rss_context - A user-defined RSS context for filtering + * @list: node of linked list on which this struct is stored + * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or + * %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC. + * For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID. + * @user_id: the rss_context ID exposed to userspace over ethtool. + * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + * @rx_hash_key: Toeplitz hash key for this RSS context + * @indir_table: Indirection table for this RSS context + */ +struct efx_rss_context { + struct list_head list; + u32 context_id; + u32 user_id; + bool rx_hash_udp_4tuple; + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; +}; + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -764,11 +793,10 @@ struct vfdi_status; * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) * @rx_packet_ts_offset: Offset of timestamp from start of packet data * (valid only if channel->sync_timestamps_enabled; always negative) - * @rx_hash_key: Toeplitz hash key for RSS - * @rx_indir_table: Indirection table for RSS * @rx_scatter: Scatter mode enabled for receives - * @rss_active: RSS enabled on hardware - * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + * @rss_context: Main RSS context. Its @list member is the head of the list of + * RSS contexts created by user requests + * @rss_lock: Protects custom RSS context software state in @rss_context.list * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will @@ -800,6 +828,8 @@ struct vfdi_status; * @mdio_bus: PHY MDIO bus ID (only used by Siena) * @phy_mode: PHY operating mode. Serialised by @mac_lock. * @link_advertising: Autonegotiation advertising flags + * @fec_config: Forward Error Correction configuration flags. For bit positions + * see &enum ethtool_fec_config_bits. * @link_state: Current state of the link * @n_link_state_changes: Number of times the link has changed state * @unicast_filter: Flag for Falcon-arch simple unicast filter. @@ -814,9 +844,9 @@ struct vfdi_status; * @loopback_mode: Loopback status * @loopback_modes: Supported loopback mode bitmask * @loopback_selftest: Offline self-test private state - * @filter_sem: Filter table rw_semaphore, for freeing the table - * @filter_lock: Filter table lock, for mere content changes + * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state * @filter_state: Architecture-dependent filter table state + * @rps_mutex: Protects RPS state of all channels * @rps_expire_channel: Next channel to check for expiry * @rps_expire_index: Next index to check for expiry in * @rps_expire_channel's @rps_flow_id @@ -909,11 +939,9 @@ struct efx_nic { int rx_packet_hash_offset; int rx_packet_len_offset; int rx_packet_ts_offset; - u8 rx_hash_key[40]; - u32 rx_indir_table[128]; bool rx_scatter; - bool rss_active; - bool rx_hash_udp_4tuple; + struct efx_rss_context rss_context; + struct mutex rss_lock; unsigned int_error_count; unsigned long int_error_expire; @@ -955,6 +983,7 @@ struct efx_nic { enum efx_phy_mode phy_mode; __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); + u32 fec_config; struct efx_link_state link_state; unsigned int n_link_state_changes; @@ -970,9 +999,9 @@ struct efx_nic { void *loopback_selftest; struct rw_semaphore filter_sem; - spinlock_t filter_lock; void *filter_state; #ifdef CONFIG_RFS_ACCEL + struct mutex rps_mutex; unsigned int rps_expire_channel; unsigned int rps_expire_index; #endif @@ -1099,6 +1128,10 @@ struct efx_udp_tunnel { * @tx_write: Write TX descriptors and doorbell * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC + * @rx_push_rss_context_config: Write RSS hash key and indirection table for + * user RSS context to the NIC + * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user + * RSS context back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1123,10 +1156,6 @@ struct efx_udp_tunnel { * @filter_count_rx_used: Get the number of filters in use at a given priority * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 * @filter_get_rx_ids: Get list of RX filters at a given priority - * @filter_rfs_insert: Add or replace a filter for RFS. This must be - * atomic. The hardware change may be asynchronous but should - * not be delayed for long. It may fail if this can't be done - * atomically. * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. * This must check whether the specified table entry is used by RFS * and that rps_may_expire_flow() returns true for it. @@ -1237,6 +1266,13 @@ struct efx_nic_type { int (*rx_push_rss_config)(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); int (*rx_pull_rss_config)(struct efx_nic *efx); + int (*rx_push_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key); + int (*rx_pull_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx); + void (*rx_restore_rss_contexts)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1270,8 +1306,6 @@ struct efx_nic_type { enum efx_filter_priority priority, u32 *buf, u32 size); #ifdef CONFIG_RFS_ACCEL - s32 (*filter_rfs_insert)(struct efx_nic *efx, - struct efx_filter_spec *spec); bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); #endif diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 6549fc685a48..5cca0556b47f 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -332,7 +332,6 @@ enum { EF10_STAT_fec_corrected_symbols_lane1, EF10_STAT_fec_corrected_symbols_lane2, EF10_STAT_fec_corrected_symbols_lane3, - EF10_STAT_ctpio_dmabuf_start, EF10_STAT_ctpio_vi_busy_fallback, EF10_STAT_ctpio_long_write_success, EF10_STAT_ctpio_missing_dbell_fail, @@ -365,6 +364,8 @@ enum { * @vi_base: Absolute index of first VI in this function * @n_allocated_vis: Number of VIs allocated to this function * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after + * MC reboot * @must_restore_filters: Flag: filters have yet to be restored after MC reboot * @n_piobufs: Number of PIO buffers allocated to this function * @wc_membase: Base address of write-combining mapping of the memory BAR @@ -374,7 +375,6 @@ enum { * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot - * @rx_rss_context: Firmware handle for our RSS context * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 @@ -408,6 +408,7 @@ struct efx_ef10_nic_data { unsigned int vi_base; unsigned int n_allocated_vis; bool must_realloc_vis; + bool must_restore_rss_contexts; bool must_restore_filters; unsigned int n_piobufs; void __iomem *wc_membase, *pio_write_base; @@ -415,7 +416,6 @@ struct efx_ef10_nic_data { unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; u16 piobuf_size; bool must_restore_piobufs; - u32 rx_rss_context; bool rx_rss_context_exclusive; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; @@ -603,8 +603,6 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, enum efx_filter_priority priority, u32 *buf, u32 size); #ifdef CONFIG_RFS_ACCEL -s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, - struct efx_filter_spec *spec); bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int index); #endif diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index cfe76aad79ee..95682831484e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -827,14 +827,67 @@ MODULE_PARM_DESC(rx_refill_threshold, #ifdef CONFIG_RFS_ACCEL +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +static void efx_filter_rfs_work(struct work_struct *data) +{ + struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, + work); + struct efx_nic *efx = netdev_priv(req->net_dev); + struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int rc; + + rc = efx->type->filter_insert(efx, &req->spec, false); + if (rc >= 0) { + /* Remember this so we can check whether to expire the filter + * later. + */ + mutex_lock(&efx->rps_mutex); + channel->rps_flow_id[rc] = req->flow_id; + ++channel->rfs_filters_added; + mutex_unlock(&efx->rps_mutex); + + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc); + } + + /* Release references */ + dev_put(req->net_dev); + kfree(req); +} + int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_channel *channel; - struct efx_filter_spec spec; + struct efx_async_filter_insertion *req; struct flow_keys fk; - int rc; if (flow_id == RPS_FLOW_ID_INVALID) return -EINVAL; @@ -847,50 +900,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) return -EPROTONOSUPPORT; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, + req = kmalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, rxq_index); - spec.match_flags = + req->spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; - spec.ether_type = fk.basic.n_proto; - spec.ip_proto = fk.basic.ip_proto; + req->spec.ether_type = fk.basic.n_proto; + req->spec.ip_proto = fk.basic.ip_proto; if (fk.basic.n_proto == htons(ETH_P_IP)) { - spec.rem_host[0] = fk.addrs.v4addrs.src; - spec.loc_host[0] = fk.addrs.v4addrs.dst; + req->spec.rem_host[0] = fk.addrs.v4addrs.src; + req->spec.loc_host[0] = fk.addrs.v4addrs.dst; } else { - memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); - memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); + memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, + sizeof(struct in6_addr)); } - spec.rem_port = fk.ports.src; - spec.loc_port = fk.ports.dst; - - rc = efx->type->filter_rfs_insert(efx, &spec); - if (rc < 0) - return rc; + req->spec.rem_port = fk.ports.src; + req->spec.loc_port = fk.ports.dst; - /* Remember this so we can check whether to expire the filter later */ - channel = efx_get_channel(efx, rxq_index); - channel->rps_flow_id[rc] = flow_id; - ++channel->rfs_filters_added; - - if (spec.ether_type == htons(ETH_P_IP)) - netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", - (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(spec.rem_port), spec.loc_host, - ntohs(spec.loc_port), rxq_index, flow_id, rc); - else - netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", - (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(spec.rem_port), spec.loc_host, - ntohs(spec.loc_port), rxq_index, flow_id, rc); - - return rc; + dev_hold(req->net_dev = net_dev); + INIT_WORK(&req->work, efx_filter_rfs_work); + req->rxq_index = rxq_index; + req->flow_id = flow_id; + schedule_work(&req->work); + return 0; } bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) @@ -899,9 +941,8 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) unsigned int channel_idx, index, size; u32 flow_id; - if (!spin_trylock_bh(&efx->filter_lock)) + if (!mutex_trylock(&efx->rps_mutex)) return false; - expire_one = efx->type->filter_rfs_expire_one; channel_idx = efx->rps_expire_channel; index = efx->rps_expire_index; @@ -926,7 +967,7 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) efx->rps_expire_channel = channel_idx; efx->rps_expire_index = index; - spin_unlock_bh(&efx->filter_lock); + mutex_unlock(&efx->rps_mutex); return true; } diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ae8645ae4492..65161f68265a 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -350,11 +350,11 @@ static int siena_rx_pull_rss_config(struct efx_nic *efx) * siena_rx_push_rss_config, below) */ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(efx->rx_hash_key, &temp, sizeof(temp)); + memcpy(efx->rss_context.rx_hash_key, &temp, sizeof(temp)); efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); - memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + memcpy(efx->rss_context.rx_hash_key + sizeof(temp), &temp, sizeof(temp)); efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); - memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp, + memcpy(efx->rss_context.rx_hash_key + 2 * sizeof(temp), &temp, FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_farch_rx_pull_indir_table(efx); return 0; @@ -367,26 +367,26 @@ static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, /* Set hash key for IPv4 */ if (key) - memcpy(efx->rx_hash_key, key, sizeof(temp)); - memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + memcpy(efx->rss_context.rx_hash_key, key, sizeof(temp)); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Enable IPv6 RSS */ - BUILD_BUG_ON(sizeof(efx->rx_hash_key) < + BUILD_BUG_ON(sizeof(efx->rss_context.rx_hash_key) < 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); - memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); + memcpy(&temp, efx->rss_context.rx_hash_key + sizeof(temp), sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); - memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), + memcpy(&temp, efx->rss_context.rx_hash_key + 2 * sizeof(temp), FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); - memcpy(efx->rx_indir_table, rx_indir_table, - sizeof(efx->rx_indir_table)); + memcpy(efx->rss_context.rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); efx_farch_rx_push_indir_table(efx); return 0; @@ -432,8 +432,8 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); - efx->rss_active = true; + siena_rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); + efx->rss_context.context_id = 0; /* indicates RSS is active */ /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); @@ -1035,7 +1035,6 @@ const struct efx_nic_type siena_a0_nic_type = { .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, .filter_get_rx_ids = efx_farch_filter_get_rx_ids, #ifdef CONFIG_RFS_ACCEL - .filter_rfs_insert = efx_farch_filter_rfs_insert, .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, #endif #ifdef CONFIG_SFC_MTD diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 63aca9f847e1..358820282ef0 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -5,8 +5,8 @@ config NET_VENDOR_SMSC bool "SMC (SMSC)/Western Digital devices" default y - depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ - ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \ + depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \ + ISA || MAC || MIPS || NIOS2 || PCI || \ PCMCIA || SUPERH || XTENSA || H8300 ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -20,7 +20,7 @@ if NET_VENDOR_SMSC config SMC9194 tristate "SMC 9194 support" - depends on (ISA || MAC && BROKEN) + depends on ISA select CRC32 ---help--- This is support for the SMC9xxx based Ethernet cards. Choose this @@ -37,8 +37,8 @@ config SMC91X select CRC32 select MII depends on !OF || GPIOLIB - depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ - M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA || H8300 + depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \ + MIPS || NIOS2 || SUPERH || XTENSA || H8300 ---help--- This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it @@ -77,7 +77,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on (ARM || SUPERH || MN10300) + depends on (ARM || SUPERH) ---help--- This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 08b17adf0a65..b337ee97e0c0 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -144,32 +144,6 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, #define SMC_IRQ_FLAGS (0) -#elif defined(CONFIG_M32R) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 - -#define SMC_inb(a, r) inb(((u32)a) + (r)) -#define SMC_inw(a, r) inw(((u32)a) + (r)) -#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r)) -#define SMC_outw(lp, v, a, r) outw(v, ((u32)a) + (r)) -#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l) -#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l) - -#define SMC_IRQ_FLAGS (0) - -#define RPC_LSA_DEFAULT RPC_LED_TX_RX -#define RPC_LSB_DEFAULT RPC_LED_100_10 - -#elif defined(CONFIG_MN10300) - -/* - * MN10300/AM33 configuration - */ - -#include <unit/smc91111.h> - #elif defined(CONFIG_ATARI) #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 012fb66eed8d..f0afb88d7bc2 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) pdata = netdev_priv(dev); BUG_ON(!pdata); BUG_ON(!pdata->ioaddr); - WARN_ON(dev->phydev); SMSC_TRACE(pdata, ifdown, "Stopping driver"); + unregister_netdev(dev); + mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); - unregister_netdev(dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 111e7ca9df56..0b3b7a460641 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev) val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); writel(val, priv->base + AVE_IIRQC); - val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; + val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP; ave_irq_restore(ndev, val); napi_enable(&priv->napi_rx); @@ -1701,6 +1701,10 @@ static const struct ave_soc_data ave_ld20_data = { .is_desc_64bit = true, }; +static const struct ave_soc_data ave_pxs3_data = { + .is_desc_64bit = false, +}; + static const struct of_device_id of_ave_match[] = { { .compatible = "socionext,uniphier-pro4-ave4", @@ -1718,6 +1722,10 @@ static const struct of_device_id of_ave_match[] = { .compatible = "socionext,uniphier-ld20-ave4", .data = &ave_ld20_data, }, + { + .compatible = "socionext,uniphier-pxs3-ave4", + .data = &ave_pxs3_data, + }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_ave_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index ff3f83b86d10..972e4ef6d414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -4,7 +4,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ - dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y) + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 2ffe76c0ff74..ad2388aee463 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -38,6 +38,8 @@ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 #define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_5_00 0x50 +#define DWMAC_CORE_5_10 0x51 #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ /* These need to be power of two, and >= 4 */ @@ -174,6 +176,17 @@ struct stmmac_extra_stats { unsigned long tx_tso_nfrags; }; +/* Safety Feature statistics exposed by ethtool */ +struct stmmac_safety_stats { + unsigned long mac_errors[32]; + unsigned long mtl_errors[32]; + unsigned long dma_errors[32]; +}; + +/* Number of fields in Safety Stats */ +#define STMMAC_SAFETY_FEAT_SIZE \ + (sizeof(struct stmmac_safety_stats) / sizeof(unsigned long)) + /* CSR Frequency Access Defines*/ #define CSR_F_35M 35000000 #define CSR_F_60M 60000000 @@ -336,6 +349,8 @@ struct dma_features { /* TX and RX FIFO sizes */ unsigned int tx_fifo_size; unsigned int rx_fifo_size; + /* Automotive Safety Package */ + unsigned int asp; }; /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ @@ -532,6 +547,13 @@ struct stmmac_ops { bool loopback); void (*pcs_rane)(void __iomem *ioaddr, bool restart); void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); + /* Safety Features */ + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); + bool (*safety_feat_irq_status)(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); + const char *(*safety_feat_dump)(struct stmmac_safety_stats *stats, + int index, unsigned long *count); }; /* PTP and HW Timer helpers */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 5270d26f0bc6..7cb794094a70 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -1,5 +1,5 @@ /* - * Amlogic Meson8b and GXBB DWMAC glue layer + * Amlogic Meson8b, Meson8m2 and GXBB DWMAC glue layer * * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> * @@ -48,26 +48,18 @@ #define MUX_CLK_NUM_PARENTS 2 struct meson8b_dwmac { - struct platform_device *pdev; - + struct device *dev; void __iomem *regs; - phy_interface_t phy_mode; + struct clk *rgmii_tx_clk; + u32 tx_delay_ns; +}; +struct meson8b_dwmac_clk_configs { struct clk_mux m250_mux; - struct clk *m250_mux_clk; - struct clk *m250_mux_parent[MUX_CLK_NUM_PARENTS]; - struct clk_divider m250_div; - struct clk *m250_div_clk; - struct clk_fixed_factor fixed_div2; - struct clk *fixed_div2_clk; - struct clk_gate rgmii_tx_en; - struct clk *rgmii_tx_en_clk; - - u32 tx_delay_ns; }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, @@ -82,106 +74,99 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, writel(data, dwmac->regs + reg); } -static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) +static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac, + const char *name_suffix, + const char **parent_names, + int num_parents, + const struct clk_ops *ops, + struct clk_hw *hw) { struct clk_init_data init; - int i, ret; - struct device *dev = &dwmac->pdev->dev; char clk_name[32]; - const char *clk_div_parents[1]; - const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; + + snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dwmac->dev), + name_suffix); + + init.name = clk_name; + init.ops = ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = parent_names; + init.num_parents = num_parents; + + hw->init = &init; + + return devm_clk_register(dwmac->dev, hw); +} + +static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) +{ + int i, ret; + struct clk *clk; + struct device *dev = dwmac->dev; + const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; + struct meson8b_dwmac_clk_configs *clk_configs; + + clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); + if (!clk_configs) + return -ENOMEM; /* get the mux parents from DT */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { char name[16]; snprintf(name, sizeof(name), "clkin%d", i); - dwmac->m250_mux_parent[i] = devm_clk_get(dev, name); - if (IS_ERR(dwmac->m250_mux_parent[i])) { - ret = PTR_ERR(dwmac->m250_mux_parent[i]); + clk = devm_clk_get(dev, name); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); if (ret != -EPROBE_DEFER) dev_err(dev, "Missing clock %s\n", name); return ret; } - mux_parent_names[i] = - __clk_get_name(dwmac->m250_mux_parent[i]); + mux_parent_names[i] = __clk_get_name(clk); } - /* create the m250_mux */ - snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev)); - init.name = clk_name; - init.ops = &clk_mux_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = mux_parent_names; - init.num_parents = MUX_CLK_NUM_PARENTS; - - dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0; - dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; - dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; - dwmac->m250_mux.flags = 0; - dwmac->m250_mux.table = NULL; - dwmac->m250_mux.hw.init = &init; - - dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw); - if (WARN_ON(IS_ERR(dwmac->m250_mux_clk))) - return PTR_ERR(dwmac->m250_mux_clk); - - /* create the m250_div */ - snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev)); - init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); - init.ops = &clk_divider_ops; - init.flags = CLK_SET_RATE_PARENT; - clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk); - init.parent_names = clk_div_parents; - init.num_parents = ARRAY_SIZE(clk_div_parents); - - dwmac->m250_div.reg = dwmac->regs + PRG_ETH0; - dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; - dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; - dwmac->m250_div.hw.init = &init; - dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | + clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; + clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; + clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; + clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names, + MUX_CLK_NUM_PARENTS, &clk_mux_ops, + &clk_configs->m250_mux.hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + parent_name = __clk_get_name(clk); + clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; + clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; + clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; + clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | CLK_DIVIDER_ROUND_CLOSEST; - - dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw); - if (WARN_ON(IS_ERR(dwmac->m250_div_clk))) - return PTR_ERR(dwmac->m250_div_clk); - - /* create the fixed_div2 */ - snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev)); - init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); - init.ops = &clk_fixed_factor_ops; - init.flags = CLK_SET_RATE_PARENT; - clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk); - init.parent_names = clk_div_parents; - init.num_parents = ARRAY_SIZE(clk_div_parents); - - dwmac->fixed_div2.mult = 1; - dwmac->fixed_div2.div = 2; - dwmac->fixed_div2.hw.init = &init; - - dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw); - if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk))) - return PTR_ERR(dwmac->fixed_div2_clk); - - /* create the rgmii_tx_en */ - init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en", - dev_name(dev)); - init.ops = &clk_gate_ops; - init.flags = CLK_SET_RATE_PARENT; - clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk); - init.parent_names = clk_div_parents; - init.num_parents = ARRAY_SIZE(clk_div_parents); - - dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0; - dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN; - dwmac->rgmii_tx_en.hw.init = &init; - - dwmac->rgmii_tx_en_clk = devm_clk_register(dev, - &dwmac->rgmii_tx_en.hw); - if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk))) - return PTR_ERR(dwmac->rgmii_tx_en_clk); + clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, + &clk_divider_ops, + &clk_configs->m250_div.hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + parent_name = __clk_get_name(clk); + clk_configs->fixed_div2.mult = 1; + clk_configs->fixed_div2.div = 2; + clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_name, 1, + &clk_fixed_factor_ops, + &clk_configs->fixed_div2.hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + parent_name = __clk_get_name(clk); + clk_configs->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0; + clk_configs->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN; + clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_name, 1, + &clk_gate_ops, + &clk_configs->rgmii_tx_en.hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + dwmac->rgmii_tx_clk = clk; return 0; } @@ -219,19 +204,23 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) * a register) based on the line-speed (125MHz for Gbit speeds, * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s). */ - ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000); + ret = clk_set_rate(dwmac->rgmii_tx_clk, 125 * 1000 * 1000); if (ret) { - dev_err(&dwmac->pdev->dev, + dev_err(dwmac->dev, "failed to set RGMII TX clock\n"); return ret; } - ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk); + ret = clk_prepare_enable(dwmac->rgmii_tx_clk); if (ret) { - dev_err(&dwmac->pdev->dev, + dev_err(dwmac->dev, "failed to enable the RGMII TX clock\n"); return ret; } + + devm_add_action_or_reset(dwmac->dev, + (void(*)(void *))clk_disable_unprepare, + dwmac->rgmii_tx_clk); break; case PHY_INTERFACE_MODE_RMII: @@ -251,7 +240,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) break; default: - dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n", + dev_err(dwmac->dev, "unsupported phy-mode %s\n", phy_modes(dwmac->phy_mode)); return -EINVAL; } @@ -292,7 +281,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } - dwmac->pdev = pdev; + dwmac->dev = &pdev->dev; dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); if (dwmac->phy_mode < 0) { dev_err(&pdev->dev, "missing phy-mode property\n"); @@ -317,31 +306,19 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_clk_disable; + goto err_remove_config_dt; return 0; -err_clk_disable: - if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) - clk_disable_unprepare(dwmac->rgmii_tx_en_clk); err_remove_config_dt: stmmac_remove_config_dt(pdev, plat_dat); return ret; } -static int meson8b_dwmac_remove(struct platform_device *pdev) -{ - struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); - - if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) - clk_disable_unprepare(dwmac->rgmii_tx_en_clk); - - return stmmac_pltfr_remove(pdev); -} - static const struct of_device_id meson8b_dwmac_match[] = { { .compatible = "amlogic,meson8b-dwmac" }, + { .compatible = "amlogic,meson8m2-dwmac" }, { .compatible = "amlogic,meson-gxbb-dwmac" }, { } }; @@ -349,7 +326,7 @@ MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); static struct platform_driver meson8b_dwmac_driver = { .probe = meson8b_dwmac_probe, - .remove = meson8b_dwmac_remove, + .remove = stmmac_pltfr_remove, .driver = { .name = "meson8b-dwmac", .pm = &stmmac_pltfr_pm_ops, @@ -359,5 +336,5 @@ static struct platform_driver meson8b_dwmac_driver = { module_platform_driver(meson8b_dwmac_driver); MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); -MODULE_DESCRIPTION("Amlogic Meson8b and GXBB DWMAC glue layer"); +MODULE_DESCRIPTION("Amlogic Meson8b, Meson8m2 and GXBB DWMAC glue layer"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 7761a26ec9c5..c7bff596c665 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -39,6 +39,7 @@ #define GMAC_HW_FEATURE0 0x0000011c #define GMAC_HW_FEATURE1 0x00000120 #define GMAC_HW_FEATURE2 0x00000124 +#define GMAC_HW_FEATURE3 0x00000128 #define GMAC_MDIO_ADDR 0x00000200 #define GMAC_MDIO_DATA 0x00000204 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) @@ -192,6 +193,9 @@ enum power_event { #define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) #define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) +/* MAC HW features3 bitmap */ +#define GMAC_HW_FEAT_ASP GENMASK(29, 28) + /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) #define GMAC_HI_DCS_SHIFT 16 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 63795ecafc8d..a3af92ebbca8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -20,6 +20,7 @@ #include <net/dsa.h> #include "stmmac_pcs.h" #include "dwmac4.h" +#include "dwmac5.h" static void dwmac4_core_init(struct mac_device_info *hw, struct net_device *dev) @@ -120,7 +121,7 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw, writel(value, ioaddr + base_register); } -static void dwmac4_tx_queue_routing(struct mac_device_info *hw, +static void dwmac4_rx_queue_routing(struct mac_device_info *hw, u8 packet, u32 queue) { void __iomem *ioaddr = hw->pcsr; @@ -713,7 +714,7 @@ static const struct stmmac_ops dwmac4_ops = { .rx_queue_enable = dwmac4_rx_queue_enable, .rx_queue_prio = dwmac4_rx_queue_priority, .tx_queue_prio = dwmac4_tx_queue_priority, - .rx_queue_routing = dwmac4_tx_queue_routing, + .rx_queue_routing = dwmac4_rx_queue_routing, .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, @@ -744,7 +745,7 @@ static const struct stmmac_ops dwmac410_ops = { .rx_queue_enable = dwmac4_rx_queue_enable, .rx_queue_prio = dwmac4_rx_queue_priority, .tx_queue_prio = dwmac4_tx_queue_priority, - .rx_queue_routing = dwmac4_tx_queue_routing, + .rx_queue_routing = dwmac4_rx_queue_routing, .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, @@ -768,6 +769,40 @@ static const struct stmmac_ops dwmac410_ops = { .set_filter = dwmac4_set_filter, }; +static const struct stmmac_ops dwmac510_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .safety_feat_config = dwmac5_safety_feat_config, + .safety_feat_irq_status = dwmac5_safety_feat_irq_status, + .safety_feat_dump = dwmac5_safety_feat_dump, +}; + struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, int perfect_uc_entries, int *synopsys_id) { @@ -808,7 +843,9 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, else mac->dma = &dwmac4_dma_ops; - if (*synopsys_id >= DWMAC_CORE_4_00) + if (*synopsys_id >= DWMAC_CORE_5_10) + mac->mac = &dwmac510_ops; + else if (*synopsys_id >= DWMAC_CORE_4_00) mac->mac = &dwmac410_ops; else mac->mac = &dwmac4_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index c728ffa095de..2a6521d33e43 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -389,6 +389,8 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) { + p->des0 = 0; + p->des1 = 0; p->des2 = 0; p->des3 = 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index c110f6850ffa..d37d457306d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -373,6 +373,12 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; + + /* MAC HW feature3 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); + + /* 5.10 Features */ + dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; } /* Enable/disable TSO feature and set MSS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c new file mode 100644 index 000000000000..860de39999c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include "common.h" +#include "dwmac4.h" +#include "dwmac5.h" + +struct dwmac5_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, const struct dwmac5_error_desc *desc, + unsigned long field_offset, struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwmac5_error_desc dwmac5_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "RDPES", "Read Descriptor Parity Check Error" }, + { true, "MPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { true, "T125ES", "TX125 FSM Timeout Error" }, + { true, "R125ES", "RX125 FSM Timeout Error" }, + { true, "RVCTES", "REV MDC FSM Timeout Error" }, + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors, + STAT_OFF(mac_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MTL_ECC_INT_STATUS); + writel(value, ioaddr + MTL_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors, + STAT_OFF(mtl_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { false, "UNKNOWN", "Unknown Error" }, /* 4 */ + { false, "UNKNOWN", "Unknown Error" }, /* 5 */ + { false, "UNKNOWN", "Unknown Error" }, /* 6 */ + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + DMA_ECC_INT_STATUS); + writel(value, ioaddr + DMA_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors, + STAT_OFF(dma_errors), stats); +} + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp) +{ + u32 value; + + if (!asp) + return -EINVAL; + + /* 1. Enable Safety Features */ + value = readl(ioaddr + MTL_ECC_CONTROL); + value |= TSOEE; /* TSO ECC */ + value |= MRXPEE; /* MTL RX Parser ECC */ + value |= MESTEE; /* MTL EST ECC */ + value |= MRXEE; /* MTL RX FIFO ECC */ + value |= MTXEE; /* MTL TX FIFO ECC */ + writel(value, ioaddr + MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + MTL_ECC_INT_ENABLE); + value |= RPCEIE; /* RX Parser Memory Correctable Error */ + value |= ECEIE; /* EST Memory Correctable Error */ + value |= RXCEIE; /* RX Memory Correctable Error */ + value |= TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + DMA_ECC_INT_ENABLE); + value |= TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 5. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + MAC_FSM_CONTROL); + value |= PRTYEN; /* FSM Parity Feature */ + value |= TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + MAC_FSM_CONTROL); + + /* 4. Enable Data Parity Protection */ + value = readl(ioaddr + MTL_DPP_CONTROL); + value |= EDPP; + writel(value, ioaddr + MTL_DPP_CONTROL); + + /* + * All the Automotive Safety features are selected without the "Parity + * Port Enable for external interface" feature. + */ + if (asp <= 0x2) + return 0; + + value |= EPSI; + writel(value, ioaddr + MTL_DPP_CONTROL); + return 0; +} + +bool dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool ret = false, err, corr; + u32 mtl, dma; + + if (!asp) + return false; + + mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); + + err = (mtl & MCSIS) || (dma & MCSIS); + corr = false; + if (err) { + dwmac5_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS)); + corr = (mtl & MECIS) || (dma & MSCIS); + if (err) { + dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = dma & (DEUIS | DECIS); + corr = dma & DECIS; + if (err) { + dwmac5_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwmac5_error { + const struct dwmac5_error_desc *desc; +} dwmac5_all_errors[] = { + { dwmac5_mac_errors }, + { dwmac5_mtl_errors }, + { dwmac5_dma_errors }, +}; + +const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwmac5_all_errors)) + return NULL; + if (!dwmac5_all_errors[module].desc[offset].valid) + return NULL; + if (count) + *count = *(ptr + index); + return dwmac5_all_errors[module].desc[offset].desc; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h new file mode 100644 index 000000000000..a0d2c44711b9 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#ifndef __DWMAC5_H__ +#define __DWMAC5_H__ + +#define MAC_DPP_FSM_INT_STATUS 0x00000140 +#define MAC_AXI_SLV_DPE_ADDR_STATUS 0x00000144 +#define MAC_FSM_CONTROL 0x00000148 +#define PRTYEN BIT(1) +#define TMOUTEN BIT(0) + +#define MTL_ECC_CONTROL 0x00000cc0 +#define TSOEE BIT(4) +#define MRXPEE BIT(3) +#define MESTEE BIT(2) +#define MRXEE BIT(1) +#define MTXEE BIT(0) + +#define MTL_SAFETY_INT_STATUS 0x00000cc4 +#define MCSIS BIT(31) +#define MEUIS BIT(1) +#define MECIS BIT(0) +#define MTL_ECC_INT_ENABLE 0x00000cc8 +#define RPCEIE BIT(12) +#define ECEIE BIT(8) +#define RXCEIE BIT(4) +#define TXCEIE BIT(0) +#define MTL_ECC_INT_STATUS 0x00000ccc +#define MTL_DPP_CONTROL 0x00000ce0 +#define EPSI BIT(2) +#define OPE BIT(1) +#define EDPP BIT(0) + +#define DMA_SAFETY_INT_STATUS 0x00001080 +#define MSUIS BIT(29) +#define MSCIS BIT(28) +#define DEUIS BIT(1) +#define DECIS BIT(0) +#define DMA_ECC_INT_ENABLE 0x00001084 +#define TCEIE BIT(0) +#define DMA_ECC_INT_STATUS 0x00001088 + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); +bool dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); +const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count); + +#endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index a916e13624eb..da50451f8999 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -58,6 +58,7 @@ struct stmmac_tx_queue { unsigned int dirty_tx; dma_addr_t dma_tx_phy; u32 tx_tail_addr; + u32 mss; }; struct stmmac_rx_queue { @@ -113,6 +114,7 @@ struct stmmac_priv { int mii_irq[PHY_MAX_ADDR]; struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; + struct stmmac_safety_stats sstats; struct plat_stmmacenet_data *plat; struct dma_features dma_cap; struct stmmac_counters mmc; @@ -138,13 +140,23 @@ struct stmmac_priv { spinlock_t ptp_lock; void __iomem *mmcaddr; void __iomem *ptpaddr; - u32 mss; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; struct dentry *dbgfs_rings_status; struct dentry *dbgfs_dma_cap; #endif + + unsigned long state; + struct workqueue_struct *wq; + struct work_struct service_task; +}; + +enum stmmac_state { + STMMAC_DOWN, + STMMAC_RESET_REQUESTED, + STMMAC_RESETING, + STMMAC_SERVICE_SCHED, }; int stmmac_mdio_unregister(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index af30b4857c3b..2c6ed47704fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -523,11 +523,23 @@ stmmac_set_pauseparam(struct net_device *netdev, static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { + const char *(*dump)(struct stmmac_safety_stats *stats, int index, + unsigned long *count); struct stmmac_priv *priv = netdev_priv(dev); u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; + unsigned long count; int i, j = 0; + if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { + dump = priv->hw->mac->safety_feat_dump; + + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (dump(&priv->sstats, i, &count)) + data[j++] = count; + } + } + /* Update the DMA HW counters for dwmac10/100 */ if (priv->hw->dma->dma_diagnostic_fr) priv->hw->dma->dma_diagnostic_fr(&dev->stats, @@ -569,7 +581,9 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); - int len; + const char *(*dump)(struct stmmac_safety_stats *stats, int index, + unsigned long *count); + int i, len, safety_len = 0; switch (sset) { case ETH_SS_STATS: @@ -577,6 +591,16 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; + if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { + dump = priv->hw->mac->safety_feat_dump; + + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (dump(&priv->sstats, i, NULL)) + safety_len++; + } + + len += safety_len; + } return len; default: @@ -589,9 +613,22 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) int i; u8 *p = data; struct stmmac_priv *priv = netdev_priv(dev); + const char *(*dump)(struct stmmac_safety_stats *stats, int index, + unsigned long *count); switch (stringset) { case ETH_SS_STATS: + if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { + dump = priv->hw->mac->safety_feat_dump; + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + const char *desc = dump(&priv->sstats, i, NULL); + + if (desc) { + memcpy(p, desc, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + } if (priv->dma_cap.rmon) for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { memcpy(p, stmmac_mmc[i].stat_string, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7ad841434ec8..9a16931ce39d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -57,36 +57,36 @@ /* Module parameters */ #define TX_TIMEO 5000 static int watchdog = TX_TIMEO; -module_param(watchdog, int, S_IRUGO | S_IWUSR); +module_param(watchdog, int, 0644); MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); static int debug = -1; -module_param(debug, int, S_IRUGO | S_IWUSR); +module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); static int phyaddr = -1; -module_param(phyaddr, int, S_IRUGO); +module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) static int flow_ctrl = FLOW_OFF; -module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); +module_param(flow_ctrl, int, 0644); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); static int pause = PAUSE_TIME; -module_param(pause, int, S_IRUGO | S_IWUSR); +module_param(pause, int, 0644); MODULE_PARM_DESC(pause, "Flow Control Pause Time"); #define TC_DEFAULT 64 static int tc = TC_DEFAULT; -module_param(tc, int, S_IRUGO | S_IWUSR); +module_param(tc, int, 0644); MODULE_PARM_DESC(tc, "DMA threshold control value"); #define DEFAULT_BUFSIZE 1536 static int buf_sz = DEFAULT_BUFSIZE; -module_param(buf_sz, int, S_IRUGO | S_IWUSR); +module_param(buf_sz, int, 0644); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); #define STMMAC_RX_COPYBREAK 256 @@ -97,7 +97,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | #define STMMAC_DEFAULT_LPI_TIMER 1000 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; -module_param(eee_timer, int, S_IRUGO | S_IWUSR); +module_param(eee_timer, int, 0644); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) @@ -105,7 +105,7 @@ MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); * but allow user to force to use the chain instead of the ring */ static unsigned int chain_mode; -module_param(chain_mode, int, S_IRUGO); +module_param(chain_mode, int, 0444); MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); @@ -196,6 +196,20 @@ static void stmmac_start_all_queues(struct stmmac_priv *priv) netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); } +static void stmmac_service_event_schedule(struct stmmac_priv *priv) +{ + if (!test_bit(STMMAC_DOWN, &priv->state) && + !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) + queue_work(priv->wq, &priv->service_task); +} + +static void stmmac_global_err(struct stmmac_priv *priv) +{ + netif_carrier_off(priv->dev); + set_bit(STMMAC_RESET_REQUESTED, &priv->state); + stmmac_service_event_schedule(priv); +} + /** * stmmac_clk_csr_set - dynamically set the MDC clock * @priv: driver private structure @@ -1355,6 +1369,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) tx_q->dirty_tx = 0; tx_q->cur_tx = 0; + tx_q->mss = 0; netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); } @@ -1843,6 +1858,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) if (unlikely(status & tx_dma_own)) break; + /* Make sure descriptor fields are read after reading + * the own bit. + */ + dma_rmb(); + /* Just consider the last segment and ...*/ if (likely(!(status & tx_not_ls))) { /* ... verify the status error condition */ @@ -1946,6 +1966,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) (i == DMA_TX_SIZE - 1)); tx_q->dirty_tx = 0; tx_q->cur_tx = 0; + tx_q->mss = 0; netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); stmmac_start_tx_dma(priv, chan); @@ -1993,6 +2014,22 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, } } +static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) +{ + bool ret = false; + + /* Safety features are only available in cores >= 5.10 */ + if (priv->synopsys_id < DWMAC_CORE_5_10) + return ret; + if (priv->hw->mac->safety_feat_irq_status) + ret = priv->hw->mac->safety_feat_irq_status(priv->dev, + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + + if (ret) + stmmac_global_err(priv); + return ret; +} + /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure @@ -2430,7 +2467,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; - priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); + priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); } } @@ -2482,6 +2519,17 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) stmmac_mac_config_rx_queues_routing(priv); } +static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) +{ + if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) { + netdev_info(priv->dev, "Enabling Safety Features\n"); + priv->hw->mac->safety_feat_config(priv->ioaddr, + priv->dma_cap.asp); + } else { + netdev_info(priv->dev, "No Safety Features support found\n"); + } +} + /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -2533,6 +2581,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->synopsys_id >= DWMAC_CORE_4_00) stmmac_mtl_configuration(priv); + /* Initialize Safety Features */ + if (priv->synopsys_id >= DWMAC_CORE_5_10) + stmmac_safety_feat_configuration(priv); + ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); @@ -2632,7 +2684,6 @@ static int stmmac_open(struct net_device *dev) priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->rx_copybreak = STMMAC_RX_COPYBREAK; - priv->mss = 0; ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -2793,6 +2844,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, while (tmp_len > 0) { tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); desc = tx_q->dma_tx + tx_q->cur_tx; desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); @@ -2872,11 +2924,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) mss = skb_shinfo(skb)->gso_size; /* set new MSS value if needed */ - if (mss != priv->mss) { + if (mss != tx_q->mss) { mss_desc = tx_q->dma_tx + tx_q->cur_tx; priv->hw->desc->set_mss(mss_desc, mss); - priv->mss = mss; + tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); } if (netif_msg_tx_queued(priv)) { @@ -2887,6 +2940,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } first_entry = tx_q->cur_tx; + WARN_ON(tx_q->tx_skbuff[first_entry]); desc = tx_q->dma_tx + first_entry; first = desc; @@ -2926,7 +2980,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); - tx_q->tx_skbuff[tx_q->cur_tx] = NULL; tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; } @@ -2980,14 +3033,21 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ - if (mss_desc) + if (mss_desc) { + /* Make sure that first descriptor has been completely + * written, including its own bit. This is because MSS is + * actually before first descriptor, so we need to make + * sure that MSS's own bit is the last thing written. + */ + dma_wmb(); priv->hw->desc->set_tx_owner(mss_desc); + } /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ - dma_wmb(); + wmb(); if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", @@ -3062,6 +3122,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = tx_q->cur_tx; first_entry = entry; + WARN_ON(tx_q->tx_skbuff[first_entry]); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); @@ -3090,6 +3151,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) bool last_segment = (i == (nfrags - 1)); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + WARN_ON(tx_q->tx_skbuff[entry]); if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); @@ -3101,8 +3163,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ - tx_q->tx_skbuff[entry] = NULL; - tx_q->tx_skbuff_dma[entry].buf = des; if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) desc->des0 = cpu_to_le32(des); @@ -3211,7 +3271,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ - dma_wmb(); + wmb(); } netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); @@ -3572,12 +3632,8 @@ static int stmmac_poll(struct napi_struct *napi, int budget) static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - u32 tx_count = priv->plat->tx_queues_to_use; - u32 chan; - /* Clear Tx resources and restart transmitting again */ - for (chan = 0; chan < tx_count; chan++) - stmmac_tx_err(priv, chan); + stmmac_global_err(priv); } /** @@ -3701,6 +3757,13 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) return IRQ_NONE; } + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + /* Check if a fatal error happened */ + if (stmmac_safety_feat_interrupt(priv)) + return IRQ_HANDLED; + /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, @@ -3986,7 +4049,7 @@ static int stmmac_init_fs(struct net_device *dev) /* Entry to report DMA RX/TX rings */ priv->dbgfs_rings_status = - debugfs_create_file("descriptors_status", S_IRUGO, + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, &stmmac_rings_status_fops); @@ -3998,9 +4061,9 @@ static int stmmac_init_fs(struct net_device *dev) } /* Entry to report the DMA HW features */ - priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, - priv->dbgfs_dir, - dev, &stmmac_dma_cap_fops); + priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, + priv->dbgfs_dir, + dev, &stmmac_dma_cap_fops); if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); @@ -4036,6 +4099,37 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_mac_address = stmmac_set_mac_address, }; +static void stmmac_reset_subtask(struct stmmac_priv *priv) +{ + if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) + return; + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + netdev_err(priv->dev, "Reset adapter.\n"); + + rtnl_lock(); + netif_trans_update(priv->dev); + while (test_and_set_bit(STMMAC_RESETING, &priv->state)) + usleep_range(1000, 2000); + + set_bit(STMMAC_DOWN, &priv->state); + dev_close(priv->dev); + dev_open(priv->dev); + clear_bit(STMMAC_DOWN, &priv->state); + clear_bit(STMMAC_RESETING, &priv->state); + rtnl_unlock(); +} + +static void stmmac_service_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + service_task); + + stmmac_reset_subtask(priv); + clear_bit(STMMAC_SERVICE_SCHED, &priv->state); +} + /** * stmmac_hw_init - Init the MAC device * @priv: driver private structure @@ -4197,6 +4291,15 @@ int stmmac_dvr_probe(struct device *device, /* Verify driver arguments */ stmmac_verify_args(); + /* Allocate workqueue */ + priv->wq = create_singlethread_workqueue("stmmac_wq"); + if (!priv->wq) { + dev_err(priv->device, "failed to create workqueue\n"); + goto error_wq; + } + + INIT_WORK(&priv->service_task, stmmac_service_task); + /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -4327,6 +4430,8 @@ error_mdio_register: netif_napi_del(&rx_q->napi); } error_hw_init: + destroy_workqueue(priv->wq); +error_wq: free_netdev(ndev); return ret; @@ -4359,6 +4464,7 @@ int stmmac_dvr_remove(struct device *dev) priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); + destroy_workqueue(priv->wq); free_netdev(ndev); return 0; @@ -4436,6 +4542,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) tx_q->cur_tx = 0; tx_q->dirty_tx = 0; + tx_q->mss = 0; } } @@ -4481,11 +4588,6 @@ int stmmac_resume(struct device *dev) stmmac_reset_queues_param(priv); - /* reset private mss value to force mss context settings at - * next tso xmit (only used for gmac4). - */ - priv->mss = 0; - stmmac_clear_descriptors(priv); stmmac_hw_setup(ndev, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 05f122b8424a..ebd3e5ffa73c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -135,13 +135,14 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) * stmmac_mtl_setup - parse DT parameters for multiple queues configuration * @pdev: platform device */ -static void stmmac_mtl_setup(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) +static int stmmac_mtl_setup(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) { struct device_node *q_node; struct device_node *rx_node; struct device_node *tx_node; u8 queue = 0; + int ret = 0; /* For backwards-compatibility with device trees that don't have any * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back @@ -159,12 +160,12 @@ static void stmmac_mtl_setup(struct platform_device *pdev, rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); if (!rx_node) - return; + return ret; tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); if (!tx_node) { of_node_put(rx_node); - return; + return ret; } /* Processing RX queues common config */ @@ -220,6 +221,11 @@ static void stmmac_mtl_setup(struct platform_device *pdev, queue++; } + if (queue != plat->rx_queues_to_use) { + ret = -EINVAL; + dev_err(&pdev->dev, "Not all RX queues were configured\n"); + goto out; + } /* Processing TX queues common config */ if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", @@ -281,10 +287,18 @@ static void stmmac_mtl_setup(struct platform_device *pdev, queue++; } + if (queue != plat->tx_queues_to_use) { + ret = -EINVAL; + dev_err(&pdev->dev, "Not all TX queues were configured\n"); + goto out; + } +out: of_node_put(rx_node); of_node_put(tx_node); of_node_put(q_node); + + return ret; } /** @@ -376,6 +390,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + int rc; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -402,8 +417,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); /* To Configure PHY by using all device-tree supported properties */ - if (stmmac_dt_phy(plat, np, &pdev->dev)) - return ERR_PTR(-ENODEV); + rc = stmmac_dt_phy(plat, np, &pdev->dev); + if (rc) + return ERR_PTR(rc); of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); @@ -499,7 +515,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->axi = stmmac_axi_setup(pdev); - stmmac_mtl_setup(pdev, plat); + rc = stmmac_mtl_setup(pdev, plat); + if (rc) { + stmmac_remove_config_dt(pdev, plat); + return ERR_PTR(rc); + } /* clock setup */ plat->stmmac_clk = devm_clk_get(&pdev->dev, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8dd545fed30d..f081de4f38d7 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9437,11 +9437,11 @@ static ssize_t show_num_ports(struct device *dev, } static struct device_attribute niu_parent_attributes[] = { - __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), - __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), - __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), - __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), - __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), + __ATTR(port_phy, 0444, show_port_phy, NULL), + __ATTR(plat_type, 0444, show_plat_type, NULL), + __ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL), + __ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL), + __ATTR(num_ports, 0444, show_num_ports, NULL), {} }; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 63d3d6b215f3..a94f50442613 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1b1b78fdc138..30371274409d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -35,6 +35,7 @@ #include <linux/of_net.h> #include <linux/of_device.h> #include <linux/if_vlan.h> +#include <linux/kmemleak.h> #include <linux/pinctrl/consumer.h> @@ -120,14 +121,18 @@ do { \ #define CPDMA_RXCP 0x60 #define CPSW_POLL_WEIGHT 64 +#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 #define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN) -#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\ + ETH_FCS_LEN +\ + CPSW_RX_VLAN_ENCAP_HDR_SIZE) #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 #define CPDMA_TX_PRIORITY_MAP 0x01234567 #define CPSW_VLAN_AWARE BIT(1) +#define CPSW_RX_VLAN_ENCAP BIT(2) #define CPSW_ALE_VLAN_AWARE 1 #define CPSW_FIFO_NORMAL_MODE (0 << 16) @@ -148,6 +153,18 @@ do { \ #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 +#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) +#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16 +#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8 +#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0) +enum { + CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0, + CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV, + CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG, + CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG, +}; + static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@ -718,6 +735,49 @@ static void cpsw_tx_handler(void *token, int len, int status) dev_kfree_skb_any(skb); } +static void cpsw_rx_vlan_encap(struct sk_buff *skb) +{ + struct cpsw_priv *priv = netdev_priv(skb->dev); + struct cpsw_common *cpsw = priv->cpsw; + u32 rx_vlan_encap_hdr = *((u32 *)skb->data); + u16 vtag, vid, prio, pkt_type; + + /* Remove VLAN header encapsulation word */ + skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); + + pkt_type = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; + /* Ignore unknown & Priority-tagged packets*/ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || + pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) + return; + + vid = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & + VLAN_VID_MASK; + /* Ignore vid 0 and pass packet as is */ + if (!vid) + return; + /* Ignore default vlans in dual mac mode */ + if (cpsw->data.dual_emac && + vid == cpsw->slaves[priv->emac_port].port_vlan) + return; + + prio = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; + + vtag = (prio << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + + /* strip vlan tag for VLAN-tagged packet */ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + skb_pull(skb, VLAN_HLEN); + } +} + static void cpsw_rx_handler(void *token, int len, int status) { struct cpdma_chan *ch; @@ -752,6 +812,8 @@ static void cpsw_rx_handler(void *token, int len, int status) if (new_skb) { skb_copy_queue_mapping(new_skb, skb); skb_put(skb, len); + if (status & CPDMA_RX_VLAN_ENCAP) + cpsw_rx_vlan_encap(skb); cpts_rx_timestamp(cpsw->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); @@ -1014,7 +1076,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); - else if (phy->speed == 10) + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) mac_control |= BIT(18); /* In Band mode */ if (priv->rx_pause) @@ -1406,7 +1469,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); control_reg = readl(&cpsw->regs->control); - control_reg |= CPSW_VLAN_AWARE; + control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; writel(control_reg, &cpsw->regs->control); fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : CPSW_FIFO_NORMAL_MODE; @@ -3122,7 +3185,7 @@ static int cpsw_probe(struct platform_device *pdev) cpsw->quirk_irq = true; } - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 6f9173ff9414..31ae04117f0a 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1164,7 +1164,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) outlen -= CPDMA_DESC_CRC_LEN; status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | - CPDMA_DESC_PORT_MASK); + CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index fd65ce2b83de..d399af5389b8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -19,6 +19,8 @@ #define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) +#define CPDMA_RX_VLAN_ENCAP BIT(19) + #define CPDMA_EOI_RX_THRESH 0x0 #define CPDMA_EOI_RX 0x1 #define CPDMA_EOI_TX 0x2 diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 5a4e78fde530..c769cd9d11e7 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1901,7 +1901,7 @@ ThunderLAN driver adapter related routines * Nothing * Parms: * dev The device structure with the list - * stuctures to be reset. + * structures to be reset. * * This routine sets the variables associated with managing * the TLAN lists to their initial values. diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig deleted file mode 100644 index bdfeaf3d4fce..000000000000 --- a/drivers/net/ethernet/tile/Kconfig +++ /dev/null @@ -1,18 +0,0 @@ -# -# Tilera network device configuration -# - -config TILE_NET - tristate "Tilera GBE/XGBE network driver support" - depends on TILE - default y - select CRC32 - select TILE_GXIO_MPIPE if TILEGX - select HIGH_RES_TIMERS if TILEGX - imply PTP_1588_CLOCK if TILEGX - ---help--- - This is a standard Linux network device driver for the - on-chip Tilera Gigabit Ethernet and XAUI interfaces. - - To compile this driver as a module, choose M here: the module - will be called tile_net. diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile deleted file mode 100644 index 3d0ae1f07fc9..000000000000 --- a/drivers/net/ethernet/tile/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the TILE on-chip networking support. -# - -obj-$(CONFIG_TILE_NET) += tile_net.o -ifdef CONFIG_TILEGX -tile_net-y := tilegx.o -else -tile_net-y := tilepro.o -endif diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c deleted file mode 100644 index b3e5816a4678..000000000000 --- a/drivers/net/ethernet/tile/tilegx.c +++ /dev/null @@ -1,2279 +0,0 @@ -/* - * Copyright 2012 Tilera Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation, version 2. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for - * more details. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/moduleparam.h> -#include <linux/sched.h> -#include <linux/kernel.h> /* printk() */ -#include <linux/slab.h> /* kmalloc() */ -#include <linux/errno.h> /* error codes */ -#include <linux/types.h> /* size_t */ -#include <linux/interrupt.h> -#include <linux/in.h> -#include <linux/irq.h> -#include <linux/netdevice.h> /* struct device, and other headers */ -#include <linux/etherdevice.h> /* eth_type_trans */ -#include <linux/skbuff.h> -#include <linux/ioctl.h> -#include <linux/cdev.h> -#include <linux/hugetlb.h> -#include <linux/in6.h> -#include <linux/timer.h> -#include <linux/hrtimer.h> -#include <linux/ktime.h> -#include <linux/io.h> -#include <linux/ctype.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/tcp.h> -#include <linux/net_tstamp.h> -#include <linux/ptp_clock_kernel.h> -#include <linux/sched/isolation.h> - -#include <asm/checksum.h> -#include <asm/homecache.h> -#include <gxio/mpipe.h> -#include <arch/sim.h> - -/* Default transmit lockup timeout period, in jiffies. */ -#define TILE_NET_TIMEOUT (5 * HZ) - -/* The maximum number of distinct channels (idesc.channel is 5 bits). */ -#define TILE_NET_CHANNELS 32 - -/* Maximum number of idescs to handle per "poll". */ -#define TILE_NET_BATCH 128 - -/* Maximum number of packets to handle per "poll". */ -#define TILE_NET_WEIGHT 64 - -/* Maximum Jumbo Packet MTU */ -#define TILE_JUMBO_MAX_MTU 9000 - -/* Number of entries in each iqueue. */ -#define IQUEUE_ENTRIES 512 - -/* Number of entries in each equeue. */ -#define EQUEUE_ENTRIES 2048 - -/* Total header bytes per equeue slot. Must be big enough for 2 bytes - * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to - * 60 bytes of actual TCP header. We round up to align to cache lines. - */ -#define HEADER_BYTES 128 - -/* Maximum completions per cpu per device (must be a power of two). - * ISSUE: What is the right number here? If this is too small, then - * egress might block waiting for free space in a completions array. - * ISSUE: At the least, allocate these only for initialized echannels. - */ -#define TILE_NET_MAX_COMPS 64 - -#define MAX_FRAGS (MAX_SKB_FRAGS + 1) - -/* The "kinds" of buffer stacks (small/large/jumbo). */ -#define MAX_KINDS 3 - -/* Size of completions data to allocate. - * ISSUE: Probably more than needed since we don't use all the channels. - */ -#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) - -/* Size of NotifRing data to allocate. */ -#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) - -/* Timeout to wake the per-device TX timer after we stop the queue. - * We don't want the timeout too short (adds overhead, and might end - * up causing stop/wake/stop/wake cycles) or too long (affects performance). - * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. - */ -#define TX_TIMER_DELAY_USEC 30 - -/* Timeout to wake the per-cpu egress timer to free completions. */ -#define EGRESS_TIMER_DELAY_USEC 1000 - -MODULE_AUTHOR("Tilera Corporation"); -MODULE_LICENSE("GPL"); - -/* A "packet fragment" (a chunk of memory). */ -struct frag { - void *buf; - size_t length; -}; - -/* A single completion. */ -struct tile_net_comp { - /* The "complete_count" when the completion will be complete. */ - s64 when; - /* The buffer to be freed when the completion is complete. */ - struct sk_buff *skb; -}; - -/* The completions for a given cpu and echannel. */ -struct tile_net_comps { - /* The completions. */ - struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; - /* The number of completions used. */ - unsigned long comp_next; - /* The number of completions freed. */ - unsigned long comp_last; -}; - -/* The transmit wake timer for a given cpu and echannel. */ -struct tile_net_tx_wake { - int tx_queue_idx; - struct hrtimer timer; - struct net_device *dev; -}; - -/* Info for a specific cpu. */ -struct tile_net_info { - /* Our cpu. */ - int my_cpu; - /* A timer for handling egress completions. */ - struct hrtimer egress_timer; - /* True if "egress_timer" is scheduled. */ - bool egress_timer_scheduled; - struct info_mpipe { - /* Packet queue. */ - gxio_mpipe_iqueue_t iqueue; - /* The NAPI struct. */ - struct napi_struct napi; - /* Number of buffers (by kind) which must still be provided. */ - unsigned int num_needed_buffers[MAX_KINDS]; - /* instance id. */ - int instance; - /* True if iqueue is valid. */ - bool has_iqueue; - /* NAPI flags. */ - bool napi_added; - bool napi_enabled; - /* Comps for each egress channel. */ - struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; - /* Transmit wake timer for each egress channel. */ - struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; - } mpipe[NR_MPIPE_MAX]; -}; - -/* Info for egress on a particular egress channel. */ -struct tile_net_egress { - /* The "equeue". */ - gxio_mpipe_equeue_t *equeue; - /* The headers for TSO. */ - unsigned char *headers; -}; - -/* Info for a specific device. */ -struct tile_net_priv { - /* Our network device. */ - struct net_device *dev; - /* The primary link. */ - gxio_mpipe_link_t link; - /* The primary channel, if open, else -1. */ - int channel; - /* The "loopify" egress link, if needed. */ - gxio_mpipe_link_t loopify_link; - /* The "loopify" egress channel, if open, else -1. */ - int loopify_channel; - /* The egress channel (channel or loopify_channel). */ - int echannel; - /* mPIPE instance, 0 or 1. */ - int instance; - /* The timestamp config. */ - struct hwtstamp_config stamp_cfg; -}; - -static struct mpipe_data { - /* The ingress irq. */ - int ingress_irq; - - /* The "context" for all devices. */ - gxio_mpipe_context_t context; - - /* Egress info, indexed by "priv->echannel" - * (lazily created as needed). - */ - struct tile_net_egress - egress_for_echannel[TILE_NET_CHANNELS]; - - /* Devices currently associated with each channel. - * NOTE: The array entry can become NULL after ifconfig down, but - * we do not free the underlying net_device structures, so it is - * safe to use a pointer after reading it from this array. - */ - struct net_device - *tile_net_devs_for_channel[TILE_NET_CHANNELS]; - - /* The actual memory allocated for the buffer stacks. */ - void *buffer_stack_vas[MAX_KINDS]; - - /* The amount of memory allocated for each buffer stack. */ - size_t buffer_stack_bytes[MAX_KINDS]; - - /* The first buffer stack index - * (small = +0, large = +1, jumbo = +2). - */ - int first_buffer_stack; - - /* The buckets. */ - int first_bucket; - int num_buckets; - - /* PTP-specific data. */ - struct ptp_clock *ptp_clock; - struct ptp_clock_info caps; - - /* Lock for ptp accessors. */ - struct mutex ptp_lock; - -} mpipe_data[NR_MPIPE_MAX] = { - [0 ... (NR_MPIPE_MAX - 1)] { - .ingress_irq = -1, - .first_buffer_stack = -1, - .first_bucket = -1, - .num_buckets = 1 - } -}; - -/* A mutex for "tile_net_devs_for_channel". */ -static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); - -/* The per-cpu info. */ -static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); - - -/* The buffer size enums for each buffer stack. - * See arch/tile/include/gxio/mpipe.h for the set of possible values. - * We avoid the "10384" size because it can induce "false chaining" - * on "cut-through" jumbo packets. - */ -static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { - GXIO_MPIPE_BUFFER_SIZE_128, - GXIO_MPIPE_BUFFER_SIZE_1664, - GXIO_MPIPE_BUFFER_SIZE_16384 -}; - -/* Text value of tile_net.cpus if passed as a module parameter. */ -static char *network_cpus_string; - -/* The actual cpus in "network_cpus". */ -static struct cpumask network_cpus_map; - -/* If "tile_net.loopify=LINK" was specified, this is "LINK". */ -static char *loopify_link_name; - -/* If "tile_net.custom" was specified, this is true. */ -static bool custom_flag; - -/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ -static uint jumbo_num; - -/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */ -static inline int mpipe_instance(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - return priv->instance; -} - -/* The "tile_net.cpus" argument specifies the cpus that are dedicated - * to handle ingress packets. - * - * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where - * m, n, x, y are integer numbers that represent the cpus that can be - * neither a dedicated cpu nor a dataplane cpu. - */ -static bool network_cpus_init(void) -{ - int rc; - - if (network_cpus_string == NULL) - return false; - - rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); - if (rc != 0) { - pr_warn("tile_net.cpus=%s: malformed cpu list\n", - network_cpus_string); - return false; - } - - /* Remove dedicated cpus. */ - cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); - - if (cpumask_empty(&network_cpus_map)) { - pr_warn("Ignoring empty tile_net.cpus='%s'.\n", - network_cpus_string); - return false; - } - - pr_info("Linux network CPUs: %*pbl\n", - cpumask_pr_args(&network_cpus_map)); - return true; -} - -module_param_named(cpus, network_cpus_string, charp, 0444); -MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); - -/* The "tile_net.loopify=LINK" argument causes the named device to - * actually use "loop0" for ingress, and "loop1" for egress. This - * allows an app to sit between the actual link and linux, passing - * (some) packets along to linux, and forwarding (some) packets sent - * out by linux. - */ -module_param_named(loopify, loopify_link_name, charp, 0444); -MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); - -/* The "tile_net.custom" argument causes us to ignore the "conventional" - * classifier metadata, in particular, the "l2_offset". - */ -module_param_named(custom, custom_flag, bool, 0444); -MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); - -/* The "tile_net.jumbo" argument causes us to support "jumbo" packets, - * and to allocate the given number of "jumbo" buffers. - */ -module_param_named(jumbo, jumbo_num, uint, 0444); -MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets"); - -/* Atomically update a statistics field. - * Note that on TILE-Gx, this operation is fire-and-forget on the - * issuing core (single-cycle dispatch) and takes only a few cycles - * longer than a regular store when the request reaches the home cache. - * No expensive bus management overhead is required. - */ -static void tile_net_stats_add(unsigned long value, unsigned long *field) -{ - BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); - atomic_long_add(value, (atomic_long_t *)field); -} - -/* Allocate and push a buffer. */ -static bool tile_net_provide_buffer(int instance, int kind) -{ - struct mpipe_data *md = &mpipe_data[instance]; - gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; - size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); - const unsigned long buffer_alignment = 128; - struct sk_buff *skb; - int len; - - len = sizeof(struct sk_buff **) + buffer_alignment + bs; - skb = dev_alloc_skb(len); - if (skb == NULL) - return false; - - /* Make room for a back-pointer to 'skb' and guarantee alignment. */ - skb_reserve(skb, sizeof(struct sk_buff **)); - skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); - - /* Save a back-pointer to 'skb'. */ - *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; - - /* Make sure "skb" and the back-pointer have been flushed. */ - wmb(); - - gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, - (void *)va_to_tile_io_addr(skb->data)); - - return true; -} - -/* Convert a raw mpipe buffer to its matching skb pointer. */ -static struct sk_buff *mpipe_buf_to_skb(void *va) -{ - /* Acquire the associated "skb". */ - struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); - struct sk_buff *skb = *skb_ptr; - - /* Paranoia. */ - if (skb->data != va) { - /* Panic here since there's a reasonable chance - * that corrupt buffers means generic memory - * corruption, with unpredictable system effects. - */ - panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", - va, skb, skb->data); - } - - return skb; -} - -static void tile_net_pop_all_buffers(int instance, int stack) -{ - struct mpipe_data *md = &mpipe_data[instance]; - - for (;;) { - tile_io_addr_t addr = - (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, - stack); - if (addr == 0) - break; - dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); - } -} - -/* Provide linux buffers to mPIPE. */ -static void tile_net_provide_needed_buffers(void) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - int instance, kind; - for (instance = 0; instance < NR_MPIPE_MAX && - info->mpipe[instance].has_iqueue; instance++) { - for (kind = 0; kind < MAX_KINDS; kind++) { - while (info->mpipe[instance].num_needed_buffers[kind] - != 0) { - if (!tile_net_provide_buffer(instance, kind)) { - pr_notice("Tile %d still needs" - " some buffers\n", - info->my_cpu); - return; - } - info->mpipe[instance]. - num_needed_buffers[kind]--; - } - } - } -} - -/* Get RX timestamp, and store it in the skb. */ -static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb, - gxio_mpipe_idesc_t *idesc) -{ - if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec, - idesc->time_stamp_ns); - } -} - -/* Get TX timestamp, and store it in the skb. */ -static void tile_tx_timestamp(struct sk_buff *skb, int instance) -{ - struct skb_shared_info *shtx = skb_shinfo(skb); - if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { - struct mpipe_data *md = &mpipe_data[instance]; - struct skb_shared_hwtstamps shhwtstamps; - struct timespec64 ts; - - shtx->tx_flags |= SKBTX_IN_PROGRESS; - gxio_mpipe_get_timestamp(&md->context, &ts); - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - skb_tstamp_tx(skb, &shhwtstamps); - } -} - -/* Use ioctl() to enable or disable TX or RX timestamping. */ -static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq) -{ - struct hwtstamp_config config; - struct tile_net_priv *priv = netdev_priv(dev); - - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - if (config.flags) /* reserved for future extensions */ - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - if (copy_to_user(rq->ifr_data, &config, sizeof(config))) - return -EFAULT; - - priv->stamp_cfg = config; - return 0; -} - -static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq) -{ - struct tile_net_priv *priv = netdev_priv(dev); - - if (copy_to_user(rq->ifr_data, &priv->stamp_cfg, - sizeof(priv->stamp_cfg))) - return -EFAULT; - - return 0; -} - -static inline bool filter_packet(struct net_device *dev, void *buf) -{ - /* Filter packets received before we're up. */ - if (dev == NULL || !(dev->flags & IFF_UP)) - return true; - - /* Filter out packets that aren't for us. */ - if (!(dev->flags & IFF_PROMISC) && - !is_multicast_ether_addr(buf) && - !ether_addr_equal(dev->dev_addr, buf)) - return true; - - return false; -} - -static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, - gxio_mpipe_idesc_t *idesc, unsigned long len) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - struct tile_net_priv *priv = netdev_priv(dev); - int instance = priv->instance; - - /* Encode the actual packet length. */ - skb_put(skb, len); - - skb->protocol = eth_type_trans(skb, dev); - - /* Acknowledge "good" hardware checksums. */ - if (idesc->cs && idesc->csum_seed_val == 0xFFFF) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* Get RX timestamp from idesc. */ - tile_rx_timestamp(priv, skb, idesc); - - napi_gro_receive(&info->mpipe[instance].napi, skb); - - /* Update stats. */ - tile_net_stats_add(1, &dev->stats.rx_packets); - tile_net_stats_add(len, &dev->stats.rx_bytes); - - /* Need a new buffer. */ - if (idesc->size == buffer_size_enums[0]) - info->mpipe[instance].num_needed_buffers[0]++; - else if (idesc->size == buffer_size_enums[1]) - info->mpipe[instance].num_needed_buffers[1]++; - else - info->mpipe[instance].num_needed_buffers[2]++; -} - -/* Handle a packet. Return true if "processed", false if "filtered". */ -static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - struct mpipe_data *md = &mpipe_data[instance]; - struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; - uint8_t l2_offset; - void *va; - void *buf; - unsigned long len; - bool filter; - - /* Drop packets for which no buffer was available (which can - * happen under heavy load), or for which the me/tr/ce flags - * are set (which can happen for jumbo cut-through packets, - * or with a customized classifier). - */ - if (idesc->be || idesc->me || idesc->tr || idesc->ce) { - if (dev) - tile_net_stats_add(1, &dev->stats.rx_errors); - goto drop; - } - - /* Get the "l2_offset", if allowed. */ - l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); - - /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ - va = tile_io_addr_to_va((unsigned long)idesc->va); - - /* Get the actual packet start/length. */ - buf = va + l2_offset; - len = idesc->l2_size - l2_offset; - - /* Point "va" at the raw buffer. */ - va -= NET_IP_ALIGN; - - filter = filter_packet(dev, buf); - if (filter) { - if (dev) - tile_net_stats_add(1, &dev->stats.rx_dropped); -drop: - gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc); - } else { - struct sk_buff *skb = mpipe_buf_to_skb(va); - - /* Skip headroom, and any custom header. */ - skb_reserve(skb, NET_IP_ALIGN + l2_offset); - - tile_net_receive_skb(dev, skb, idesc, len); - } - - gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc); - return !filter; -} - -/* Handle some packets for the current CPU. - * - * This function handles up to TILE_NET_BATCH idescs per call. - * - * ISSUE: Since we do not provide new buffers until this function is - * complete, we must initially provide enough buffers for each network - * cpu to fill its iqueue and also its batched idescs. - * - * ISSUE: The "rotting packet" race condition occurs if a packet - * arrives after the queue appears to be empty, and before the - * hypervisor interrupt is re-enabled. - */ -static int tile_net_poll(struct napi_struct *napi, int budget) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - unsigned int work = 0; - gxio_mpipe_idesc_t *idesc; - int instance, i, n; - struct mpipe_data *md; - struct info_mpipe *info_mpipe = - container_of(napi, struct info_mpipe, napi); - - if (budget <= 0) - goto done; - - instance = info_mpipe->instance; - while ((n = gxio_mpipe_iqueue_try_peek( - &info_mpipe->iqueue, - &idesc)) > 0) { - for (i = 0; i < n; i++) { - if (i == TILE_NET_BATCH) - goto done; - if (tile_net_handle_packet(instance, - idesc + i)) { - if (++work >= budget) - goto done; - } - } - } - - /* There are no packets left. */ - napi_complete_done(&info_mpipe->napi, work); - - md = &mpipe_data[instance]; - /* Re-enable hypervisor interrupts. */ - gxio_mpipe_enable_notif_ring_interrupt( - &md->context, info->mpipe[instance].iqueue.ring); - - /* HACK: Avoid the "rotting packet" problem. */ - if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0) - napi_schedule(&info_mpipe->napi); - - /* ISSUE: Handle completions? */ - -done: - tile_net_provide_needed_buffers(); - - return work; -} - -/* Handle an ingress interrupt from an instance on the current cpu. */ -static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - napi_schedule(&info->mpipe[(uint64_t)id].napi); - return IRQ_HANDLED; -} - -/* Free some completions. This must be called with interrupts blocked. */ -static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, - struct tile_net_comps *comps, - int limit, bool force_update) -{ - int n = 0; - while (comps->comp_last < comps->comp_next) { - unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; - struct tile_net_comp *comp = &comps->comp_queue[cid]; - if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, - force_update || n == 0)) - break; - dev_kfree_skb_irq(comp->skb); - comps->comp_last++; - if (++n == limit) - break; - } - return n; -} - -/* Add a completion. This must be called with interrupts blocked. - * tile_net_equeue_try_reserve() will have ensured a free completion entry. - */ -static void add_comp(gxio_mpipe_equeue_t *equeue, - struct tile_net_comps *comps, - uint64_t when, struct sk_buff *skb) -{ - int cid = comps->comp_next % TILE_NET_MAX_COMPS; - comps->comp_queue[cid].when = when; - comps->comp_queue[cid].skb = skb; - comps->comp_next++; -} - -static void tile_net_schedule_tx_wake_timer(struct net_device *dev, - int tx_queue_idx) -{ - struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); - struct tile_net_priv *priv = netdev_priv(dev); - int instance = priv->instance; - struct tile_net_tx_wake *tx_wake = - &info->mpipe[instance].tx_wake[priv->echannel]; - - hrtimer_start(&tx_wake->timer, - TX_TIMER_DELAY_USEC * 1000UL, - HRTIMER_MODE_REL_PINNED); -} - -static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) -{ - struct tile_net_tx_wake *tx_wake = - container_of(t, struct tile_net_tx_wake, timer); - netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx); - return HRTIMER_NORESTART; -} - -/* Make sure the egress timer is scheduled. */ -static void tile_net_schedule_egress_timer(void) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - - if (!info->egress_timer_scheduled) { - hrtimer_start(&info->egress_timer, - EGRESS_TIMER_DELAY_USEC * 1000UL, - HRTIMER_MODE_REL_PINNED); - info->egress_timer_scheduled = true; - } -} - -/* The "function" for "info->egress_timer". - * - * This timer will reschedule itself as long as there are any pending - * completions expected for this tile. - */ -static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - unsigned long irqflags; - bool pending = false; - int i, instance; - - local_irq_save(irqflags); - - /* The timer is no longer scheduled. */ - info->egress_timer_scheduled = false; - - /* Free all possible comps for this tile. */ - for (instance = 0; instance < NR_MPIPE_MAX && - info->mpipe[instance].has_iqueue; instance++) { - for (i = 0; i < TILE_NET_CHANNELS; i++) { - struct tile_net_egress *egress = - &mpipe_data[instance].egress_for_echannel[i]; - struct tile_net_comps *comps = - info->mpipe[instance].comps_for_echannel[i]; - if (!egress || comps->comp_last >= comps->comp_next) - continue; - tile_net_free_comps(egress->equeue, comps, -1, true); - pending = pending || - (comps->comp_last < comps->comp_next); - } - } - - /* Reschedule timer if needed. */ - if (pending) - tile_net_schedule_egress_timer(); - - local_irq_restore(irqflags); - - return HRTIMER_NORESTART; -} - -/* PTP clock operations. */ - -static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -{ - int ret = 0; - struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); - mutex_lock(&md->ptp_lock); - if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) - ret = -EINVAL; - mutex_unlock(&md->ptp_lock); - return ret; -} - -static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - int ret = 0; - struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); - mutex_lock(&md->ptp_lock); - if (gxio_mpipe_adjust_timestamp(&md->context, delta)) - ret = -EBUSY; - mutex_unlock(&md->ptp_lock); - return ret; -} - -static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) -{ - int ret = 0; - struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); - mutex_lock(&md->ptp_lock); - if (gxio_mpipe_get_timestamp(&md->context, ts)) - ret = -EBUSY; - mutex_unlock(&md->ptp_lock); - return ret; -} - -static int ptp_mpipe_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - int ret = 0; - struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); - mutex_lock(&md->ptp_lock); - if (gxio_mpipe_set_timestamp(&md->context, ts)) - ret = -EBUSY; - mutex_unlock(&md->ptp_lock); - return ret; -} - -static int ptp_mpipe_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *request, int on) -{ - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info ptp_mpipe_caps = { - .owner = THIS_MODULE, - .name = "mPIPE clock", - .max_adj = 999999999, - .n_ext_ts = 0, - .n_pins = 0, - .pps = 0, - .adjfreq = ptp_mpipe_adjfreq, - .adjtime = ptp_mpipe_adjtime, - .gettime64 = ptp_mpipe_gettime, - .settime64 = ptp_mpipe_settime, - .enable = ptp_mpipe_enable, -}; - -/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ -static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) -{ - struct timespec64 ts; - - ktime_get_ts64(&ts); - gxio_mpipe_set_timestamp(&md->context, &ts); - - mutex_init(&md->ptp_lock); - md->caps = ptp_mpipe_caps; - md->ptp_clock = ptp_clock_register(&md->caps, NULL); - if (IS_ERR(md->ptp_clock)) - netdev_err(dev, "ptp_clock_register failed %ld\n", - PTR_ERR(md->ptp_clock)); -} - -/* Initialize PTP fields in a new device. */ -static void init_ptp_dev(struct tile_net_priv *priv) -{ - priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; - priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; -} - -/* Helper functions for "tile_net_update()". */ -static void enable_ingress_irq(void *irq) -{ - enable_percpu_irq((long)irq, 0); -} - -static void disable_ingress_irq(void *irq) -{ - disable_percpu_irq((long)irq); -} - -/* Helper function for tile_net_open() and tile_net_stop(). - * Always called under tile_net_devs_for_channel_mutex. - */ -static int tile_net_update(struct net_device *dev) -{ - static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ - bool saw_channel = false; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - int channel; - int rc; - int cpu; - - saw_channel = false; - gxio_mpipe_rules_init(&rules, &md->context); - - for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { - if (md->tile_net_devs_for_channel[channel] == NULL) - continue; - if (!saw_channel) { - saw_channel = true; - gxio_mpipe_rules_begin(&rules, md->first_bucket, - md->num_buckets, NULL); - gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); - } - gxio_mpipe_rules_add_channel(&rules, channel); - } - - /* NOTE: This can fail if there is no classifier. - * ISSUE: Can anything else cause it to fail? - */ - rc = gxio_mpipe_rules_commit(&rules); - if (rc != 0) { - netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n", - instance, rc); - return -EIO; - } - - /* Update all cpus, sequentially (to protect "netif_napi_add()"). - * We use on_each_cpu to handle the IPI mask or unmask. - */ - if (!saw_channel) - on_each_cpu(disable_ingress_irq, - (void *)(long)(md->ingress_irq), 1); - for_each_online_cpu(cpu) { - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - - if (!info->mpipe[instance].has_iqueue) - continue; - if (saw_channel) { - if (!info->mpipe[instance].napi_added) { - netif_napi_add(dev, &info->mpipe[instance].napi, - tile_net_poll, TILE_NET_WEIGHT); - info->mpipe[instance].napi_added = true; - } - if (!info->mpipe[instance].napi_enabled) { - napi_enable(&info->mpipe[instance].napi); - info->mpipe[instance].napi_enabled = true; - } - } else { - if (info->mpipe[instance].napi_enabled) { - napi_disable(&info->mpipe[instance].napi); - info->mpipe[instance].napi_enabled = false; - } - /* FIXME: Drain the iqueue. */ - } - } - if (saw_channel) - on_each_cpu(enable_ingress_irq, - (void *)(long)(md->ingress_irq), 1); - - /* HACK: Allow packets to flow in the simulator. */ - if (saw_channel) - sim_enable_mpipe_links(instance, -1); - - return 0; -} - -/* Initialize a buffer stack. */ -static int create_buffer_stack(struct net_device *dev, - int kind, size_t num_buffers) -{ - pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); - int stack_idx = md->first_buffer_stack + kind; - void *va; - int i, rc; - - /* Round up to 64KB and then use alloc_pages() so we get the - * required 64KB alignment. - */ - md->buffer_stack_bytes[kind] = - ALIGN(needed, 64 * 1024); - - va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); - if (va == NULL) { - netdev_err(dev, - "Could not alloc %zd bytes for buffer stack %d\n", - md->buffer_stack_bytes[kind], kind); - return -ENOMEM; - } - - /* Initialize the buffer stack. */ - rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, - buffer_size_enums[kind], va, - md->buffer_stack_bytes[kind], 0); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n", - instance, rc); - free_pages_exact(va, md->buffer_stack_bytes[kind]); - return rc; - } - - md->buffer_stack_vas[kind] = va; - - rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, - hash_pte, 0); - if (rc != 0) { - netdev_err(dev, - "gxio_mpipe_register_client_memory: mpipe[%d] %d\n", - instance, rc); - return rc; - } - - /* Provide initial buffers. */ - for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(instance, kind)) { - netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); - return -ENOMEM; - } - } - - return 0; -} - -/* Allocate and initialize mpipe buffer stacks, and register them in - * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes. - * This routine supports tile_net_init_mpipe(), below. - */ -static int init_buffer_stacks(struct net_device *dev, - int network_cpus_count) -{ - int num_kinds = MAX_KINDS - (jumbo_num == 0); - size_t num_buffers; - int rc; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - - /* Allocate the buffer stacks. */ - rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); - if (rc < 0) { - netdev_err(dev, - "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n", - instance, rc); - return rc; - } - md->first_buffer_stack = rc; - - /* Enough small/large buffers to (normally) avoid buffer errors. */ - num_buffers = - network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); - - /* Allocate the small memory stack. */ - if (rc >= 0) - rc = create_buffer_stack(dev, 0, num_buffers); - - /* Allocate the large buffer stack. */ - if (rc >= 0) - rc = create_buffer_stack(dev, 1, num_buffers); - - /* Allocate the jumbo buffer stack if needed. */ - if (rc >= 0 && jumbo_num != 0) - rc = create_buffer_stack(dev, 2, jumbo_num); - - return rc; -} - -/* Allocate per-cpu resources (memory for completions and idescs). - * This routine supports tile_net_init_mpipe(), below. - */ -static int alloc_percpu_mpipe_resources(struct net_device *dev, - int cpu, int ring) -{ - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - int order, i, rc; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - struct page *page; - void *addr; - - /* Allocate the "comps". */ - order = get_order(COMPS_SIZE); - page = homecache_alloc_pages(GFP_KERNEL, order, cpu); - if (page == NULL) { - netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", - COMPS_SIZE); - return -ENOMEM; - } - addr = pfn_to_kaddr(page_to_pfn(page)); - memset(addr, 0, COMPS_SIZE); - for (i = 0; i < TILE_NET_CHANNELS; i++) - info->mpipe[instance].comps_for_echannel[i] = - addr + i * sizeof(struct tile_net_comps); - - /* If this is a network cpu, create an iqueue. */ - if (cpumask_test_cpu(cpu, &network_cpus_map)) { - order = get_order(NOTIF_RING_SIZE); - page = homecache_alloc_pages(GFP_KERNEL, order, cpu); - if (page == NULL) { - netdev_err(dev, - "Failed to alloc %zd bytes iqueue memory\n", - NOTIF_RING_SIZE); - return -ENOMEM; - } - addr = pfn_to_kaddr(page_to_pfn(page)); - rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue, - &md->context, ring++, addr, - NOTIF_RING_SIZE, 0); - if (rc < 0) { - netdev_err(dev, - "gxio_mpipe_iqueue_init failed: %d\n", rc); - return rc; - } - info->mpipe[instance].has_iqueue = true; - } - - return ring; -} - -/* Initialize NotifGroup and buckets. - * This routine supports tile_net_init_mpipe(), below. - */ -static int init_notif_group_and_buckets(struct net_device *dev, - int ring, int network_cpus_count) -{ - int group, rc; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - - /* Allocate one NotifGroup. */ - rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); - if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n", - instance, rc); - return rc; - } - group = rc; - - /* Initialize global num_buckets value. */ - if (network_cpus_count > 4) - md->num_buckets = 256; - else if (network_cpus_count > 1) - md->num_buckets = 16; - - /* Allocate some buckets, and set global first_bucket value. */ - rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); - if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n", - instance, rc); - return rc; - } - md->first_bucket = rc; - - /* Init group and buckets. */ - rc = gxio_mpipe_init_notif_group_and_buckets( - &md->context, group, ring, network_cpus_count, - md->first_bucket, md->num_buckets, - GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: " - "mpipe[%d] %d\n", instance, rc); - return rc; - } - - return 0; -} - -/* Create an irq and register it, then activate the irq and request - * interrupts on all cores. Note that "ingress_irq" being initialized - * is how we know not to call tile_net_init_mpipe() again. - * This routine supports tile_net_init_mpipe(), below. - */ -static int tile_net_setup_interrupts(struct net_device *dev) -{ - int cpu, rc, irq; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - - irq = md->ingress_irq; - if (irq < 0) { - irq = irq_alloc_hwirq(-1); - if (!irq) { - netdev_err(dev, - "create_irq failed: mpipe[%d] %d\n", - instance, irq); - return irq; - } - tile_irq_activate(irq, TILE_IRQ_PERCPU); - - rc = request_irq(irq, tile_net_handle_ingress_irq, - 0, "tile_net", (void *)((uint64_t)instance)); - - if (rc != 0) { - netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", - instance, rc); - irq_free_hwirq(irq); - return rc; - } - md->ingress_irq = irq; - } - - for_each_online_cpu(cpu) { - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - if (info->mpipe[instance].has_iqueue) { - gxio_mpipe_request_notif_ring_interrupt(&md->context, - cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, - info->mpipe[instance].iqueue.ring); - } - } - - return 0; -} - -/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ -static void tile_net_init_mpipe_fail(int instance) -{ - int kind, cpu; - struct mpipe_data *md = &mpipe_data[instance]; - - /* Do cleanups that require the mpipe context first. */ - for (kind = 0; kind < MAX_KINDS; kind++) { - if (md->buffer_stack_vas[kind] != NULL) { - tile_net_pop_all_buffers(instance, - md->first_buffer_stack + - kind); - } - } - - /* Destroy mpipe context so the hardware no longer owns any memory. */ - gxio_mpipe_destroy(&md->context); - - for_each_online_cpu(cpu) { - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - free_pages( - (unsigned long)( - info->mpipe[instance].comps_for_echannel[0]), - get_order(COMPS_SIZE)); - info->mpipe[instance].comps_for_echannel[0] = NULL; - free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs), - get_order(NOTIF_RING_SIZE)); - info->mpipe[instance].iqueue.idescs = NULL; - } - - for (kind = 0; kind < MAX_KINDS; kind++) { - if (md->buffer_stack_vas[kind] != NULL) { - free_pages_exact(md->buffer_stack_vas[kind], - md->buffer_stack_bytes[kind]); - md->buffer_stack_vas[kind] = NULL; - } - } - - md->first_buffer_stack = -1; - md->first_bucket = -1; -} - -/* The first time any tilegx network device is opened, we initialize - * the global mpipe state. If this step fails, we fail to open the - * device, but if it succeeds, we never need to do it again, and since - * tile_net can't be unloaded, we never undo it. - * - * Note that some resources in this path (buffer stack indices, - * bindings from init_buffer_stack, etc.) are hypervisor resources - * that are freed implicitly by gxio_mpipe_destroy(). - */ -static int tile_net_init_mpipe(struct net_device *dev) -{ - int rc; - int cpu; - int first_ring, ring; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - int network_cpus_count = cpumask_weight(&network_cpus_map); - - if (!hash_default) { - netdev_err(dev, "Networking requires hash_default!\n"); - return -EIO; - } - - rc = gxio_mpipe_init(&md->context, instance); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n", - instance, rc); - return -EIO; - } - - /* Set up the buffer stacks. */ - rc = init_buffer_stacks(dev, network_cpus_count); - if (rc != 0) - goto fail; - - /* Allocate one NotifRing for each network cpu. */ - rc = gxio_mpipe_alloc_notif_rings(&md->context, - network_cpus_count, 0, 0); - if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", - rc); - goto fail; - } - - /* Init NotifRings per-cpu. */ - first_ring = rc; - ring = first_ring; - for_each_online_cpu(cpu) { - rc = alloc_percpu_mpipe_resources(dev, cpu, ring); - if (rc < 0) - goto fail; - ring = rc; - } - - /* Initialize NotifGroup and buckets. */ - rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); - if (rc != 0) - goto fail; - - /* Create and enable interrupts. */ - rc = tile_net_setup_interrupts(dev); - if (rc != 0) - goto fail; - - /* Register PTP clock and set mPIPE timestamp, if configured. */ - register_ptp_clock(dev, md); - - return 0; - -fail: - tile_net_init_mpipe_fail(instance); - return rc; -} - -/* Create persistent egress info for a given egress channel. - * Note that this may be shared between, say, "gbe0" and "xgbe0". - * ISSUE: Defer header allocation until TSO is actually needed? - */ -static int tile_net_init_egress(struct net_device *dev, int echannel) -{ - static int ering = -1; - struct page *headers_page, *edescs_page, *equeue_page; - gxio_mpipe_edesc_t *edescs; - gxio_mpipe_equeue_t *equeue; - unsigned char *headers; - int headers_order, edescs_order, equeue_order; - size_t edescs_size; - int rc = -ENOMEM; - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - - /* Only initialize once. */ - if (md->egress_for_echannel[echannel].equeue != NULL) - return 0; - - /* Allocate memory for the "headers". */ - headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); - headers_page = alloc_pages(GFP_KERNEL, headers_order); - if (headers_page == NULL) { - netdev_warn(dev, - "Could not alloc %zd bytes for TSO headers.\n", - PAGE_SIZE << headers_order); - goto fail; - } - headers = pfn_to_kaddr(page_to_pfn(headers_page)); - - /* Allocate memory for the "edescs". */ - edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); - edescs_order = get_order(edescs_size); - edescs_page = alloc_pages(GFP_KERNEL, edescs_order); - if (edescs_page == NULL) { - netdev_warn(dev, - "Could not alloc %zd bytes for eDMA ring.\n", - edescs_size); - goto fail_headers; - } - edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); - - /* Allocate memory for the "equeue". */ - equeue_order = get_order(sizeof(*equeue)); - equeue_page = alloc_pages(GFP_KERNEL, equeue_order); - if (equeue_page == NULL) { - netdev_warn(dev, - "Could not alloc %zd bytes for equeue info.\n", - PAGE_SIZE << equeue_order); - goto fail_edescs; - } - equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); - - /* Allocate an edma ring (using a one entry "free list"). */ - if (ering < 0) { - rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); - if (rc < 0) { - netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: " - "mpipe[%d] %d\n", instance, rc); - goto fail_equeue; - } - ering = rc; - } - - /* Initialize the equeue. */ - rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, - edescs, edescs_size, 0); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n", - instance, rc); - goto fail_equeue; - } - - /* Don't reuse the ering later. */ - ering = -1; - - if (jumbo_num != 0) { - /* Make sure "jumbo" packets can be egressed safely. */ - if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) { - /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */ - netdev_warn(dev, "Jumbo packets may not be egressed" - " properly on channel %d\n", echannel); - } - } - - /* Done. */ - md->egress_for_echannel[echannel].equeue = equeue; - md->egress_for_echannel[echannel].headers = headers; - return 0; - -fail_equeue: - __free_pages(equeue_page, equeue_order); - -fail_edescs: - __free_pages(edescs_page, edescs_order); - -fail_headers: - __free_pages(headers_page, headers_order); - -fail: - return rc; -} - -/* Return channel number for a newly-opened link. */ -static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, - const char *link_name) -{ - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); - if (rc < 0) { - netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n", - link_name, instance, rc); - return rc; - } - if (jumbo_num != 0) { - u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO; - rc = gxio_mpipe_link_set_attr(link, attr, 1); - if (rc != 0) { - netdev_err(dev, - "Cannot receive jumbo packets on '%s'\n", - link_name); - gxio_mpipe_link_close(link); - return rc; - } - } - rc = gxio_mpipe_link_channel(link); - if (rc < 0 || rc >= TILE_NET_CHANNELS) { - netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); - gxio_mpipe_link_close(link); - return -EINVAL; - } - return rc; -} - -/* Help the kernel activate the given network interface. */ -static int tile_net_open(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int cpu, rc, instance; - - mutex_lock(&tile_net_devs_for_channel_mutex); - - /* Get the instance info. */ - rc = gxio_mpipe_link_instance(dev->name); - if (rc < 0 || rc >= NR_MPIPE_MAX) { - mutex_unlock(&tile_net_devs_for_channel_mutex); - return -EIO; - } - - priv->instance = rc; - instance = rc; - if (!mpipe_data[rc].context.mmio_fast_base) { - /* Do one-time initialization per instance the first time - * any device is opened. - */ - rc = tile_net_init_mpipe(dev); - if (rc != 0) - goto fail; - } - - /* Determine if this is the "loopify" device. */ - if (unlikely((loopify_link_name != NULL) && - !strcmp(dev->name, loopify_link_name))) { - rc = tile_net_link_open(dev, &priv->link, "loop0"); - if (rc < 0) - goto fail; - priv->channel = rc; - rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); - if (rc < 0) - goto fail; - priv->loopify_channel = rc; - priv->echannel = rc; - } else { - rc = tile_net_link_open(dev, &priv->link, dev->name); - if (rc < 0) - goto fail; - priv->channel = rc; - priv->echannel = rc; - } - - /* Initialize egress info (if needed). Once ever, per echannel. */ - rc = tile_net_init_egress(dev, priv->echannel); - if (rc != 0) - goto fail; - - mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev; - - rc = tile_net_update(dev); - if (rc != 0) - goto fail; - - mutex_unlock(&tile_net_devs_for_channel_mutex); - - /* Initialize the transmit wake timer for this device for each cpu. */ - for_each_online_cpu(cpu) { - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - struct tile_net_tx_wake *tx_wake = - &info->mpipe[instance].tx_wake[priv->echannel]; - - hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - tx_wake->tx_queue_idx = cpu; - tx_wake->timer.function = tile_net_handle_tx_wake_timer; - tx_wake->dev = dev; - } - - for_each_online_cpu(cpu) - netif_start_subqueue(dev, cpu); - netif_carrier_on(dev); - return 0; - -fail: - if (priv->loopify_channel >= 0) { - if (gxio_mpipe_link_close(&priv->loopify_link) != 0) - netdev_warn(dev, "Failed to close loopify link!\n"); - priv->loopify_channel = -1; - } - if (priv->channel >= 0) { - if (gxio_mpipe_link_close(&priv->link) != 0) - netdev_warn(dev, "Failed to close link!\n"); - priv->channel = -1; - } - priv->echannel = -1; - mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL; - mutex_unlock(&tile_net_devs_for_channel_mutex); - - /* Don't return raw gxio error codes to generic Linux. */ - return (rc > -512) ? rc : -EIO; -} - -/* Help the kernel deactivate the given network interface. */ -static int tile_net_stop(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int cpu; - int instance = priv->instance; - struct mpipe_data *md = &mpipe_data[instance]; - - for_each_online_cpu(cpu) { - struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - struct tile_net_tx_wake *tx_wake = - &info->mpipe[instance].tx_wake[priv->echannel]; - - hrtimer_cancel(&tx_wake->timer); - netif_stop_subqueue(dev, cpu); - } - - mutex_lock(&tile_net_devs_for_channel_mutex); - md->tile_net_devs_for_channel[priv->channel] = NULL; - (void)tile_net_update(dev); - if (priv->loopify_channel >= 0) { - if (gxio_mpipe_link_close(&priv->loopify_link) != 0) - netdev_warn(dev, "Failed to close loopify link!\n"); - priv->loopify_channel = -1; - } - if (priv->channel >= 0) { - if (gxio_mpipe_link_close(&priv->link) != 0) - netdev_warn(dev, "Failed to close link!\n"); - priv->channel = -1; - } - priv->echannel = -1; - mutex_unlock(&tile_net_devs_for_channel_mutex); - - return 0; -} - -/* Determine the VA for a fragment. */ -static inline void *tile_net_frag_buf(skb_frag_t *f) -{ - unsigned long pfn = page_to_pfn(skb_frag_page(f)); - return pfn_to_kaddr(pfn) + f->page_offset; -} - -/* Acquire a completion entry and an egress slot, or if we can't, - * stop the queue and schedule the tx_wake timer. - */ -static s64 tile_net_equeue_try_reserve(struct net_device *dev, - int tx_queue_idx, - struct tile_net_comps *comps, - gxio_mpipe_equeue_t *equeue, - int num_edescs) -{ - /* Try to acquire a completion entry. */ - if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || - tile_net_free_comps(equeue, comps, 32, false) != 0) { - - /* Try to acquire an egress slot. */ - s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); - if (slot >= 0) - return slot; - - /* Freeing some completions gives the equeue time to drain. */ - tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); - - slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); - if (slot >= 0) - return slot; - } - - /* Still nothing; give up and stop the queue for a short while. */ - netif_stop_subqueue(dev, tx_queue_idx); - tile_net_schedule_tx_wake_timer(dev, tx_queue_idx); - return -1; -} - -/* Determine how many edesc's are needed for TSO. - * - * Sometimes, if "sendfile()" requires copying, we will be called with - * "data" containing the header and payload, with "frags" being empty. - * Sometimes, for example when using NFS over TCP, a single segment can - * span 3 fragments. This requires special care. - */ -static int tso_count_edescs(struct sk_buff *skb) -{ - struct skb_shared_info *sh = skb_shinfo(skb); - unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->len - sh_len; - unsigned int p_len = sh->gso_size; - long f_id = -1; /* id of the current fragment */ - long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ - long f_used = 0; /* bytes used from the current fragment */ - long n; /* size of the current piece of payload */ - int num_edescs = 0; - int segment; - - for (segment = 0; segment < sh->gso_segs; segment++) { - - unsigned int p_used = 0; - - /* One edesc for header and for each piece of the payload. */ - for (num_edescs++; p_used < p_len; num_edescs++) { - - /* Advance as needed. */ - while (f_used >= f_size) { - f_id++; - f_size = skb_frag_size(&sh->frags[f_id]); - f_used = 0; - } - - /* Use bytes from the current fragment. */ - n = p_len - p_used; - if (n > f_size - f_used) - n = f_size - f_used; - f_used += n; - p_used += n; - } - - /* The last segment may be less than gso_size. */ - data_len -= p_len; - if (data_len < p_len) - p_len = data_len; - } - - return num_edescs; -} - -/* Prepare modified copies of the skbuff headers. */ -static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, - s64 slot) -{ - struct skb_shared_info *sh = skb_shinfo(skb); - struct iphdr *ih; - struct ipv6hdr *ih6; - struct tcphdr *th; - unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->len - sh_len; - unsigned char *data = skb->data; - unsigned int ih_off, th_off, p_len; - unsigned int isum_seed, tsum_seed, seq; - unsigned int uninitialized_var(id); - int is_ipv6; - long f_id = -1; /* id of the current fragment */ - long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ - long f_used = 0; /* bytes used from the current fragment */ - long n; /* size of the current piece of payload */ - int segment; - - /* Locate original headers and compute various lengths. */ - is_ipv6 = skb_is_gso_v6(skb); - if (is_ipv6) { - ih6 = ipv6_hdr(skb); - ih_off = skb_network_offset(skb); - } else { - ih = ip_hdr(skb); - ih_off = skb_network_offset(skb); - isum_seed = ((0xFFFF - ih->check) + - (0xFFFF - ih->tot_len) + - (0xFFFF - ih->id)); - id = ntohs(ih->id); - } - - th = tcp_hdr(skb); - th_off = skb_transport_offset(skb); - p_len = sh->gso_size; - - tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); - seq = ntohl(th->seq); - - /* Prepare all the headers. */ - for (segment = 0; segment < sh->gso_segs; segment++) { - unsigned char *buf; - unsigned int p_used = 0; - - /* Copy to the header memory for this segment. */ - buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + - NET_IP_ALIGN; - memcpy(buf, data, sh_len); - - /* Update copied ip header. */ - if (is_ipv6) { - ih6 = (struct ipv6hdr *)(buf + ih_off); - ih6->payload_len = htons(sh_len + p_len - ih_off - - sizeof(*ih6)); - } else { - ih = (struct iphdr *)(buf + ih_off); - ih->tot_len = htons(sh_len + p_len - ih_off); - ih->id = htons(id++); - ih->check = csum_long(isum_seed + ih->tot_len + - ih->id) ^ 0xffff; - } - - /* Update copied tcp header. */ - th = (struct tcphdr *)(buf + th_off); - th->seq = htonl(seq); - th->check = csum_long(tsum_seed + htons(sh_len + p_len)); - if (segment != sh->gso_segs - 1) { - th->fin = 0; - th->psh = 0; - } - - /* Skip past the header. */ - slot++; - - /* Skip past the payload. */ - while (p_used < p_len) { - - /* Advance as needed. */ - while (f_used >= f_size) { - f_id++; - f_size = skb_frag_size(&sh->frags[f_id]); - f_used = 0; - } - - /* Use bytes from the current fragment. */ - n = p_len - p_used; - if (n > f_size - f_used) - n = f_size - f_used; - f_used += n; - p_used += n; - - slot++; - } - - seq += p_len; - - /* The last segment may be less than gso_size. */ - data_len -= p_len; - if (data_len < p_len) - p_len = data_len; - } - - /* Flush the headers so they are ready for hardware DMA. */ - wmb(); -} - -/* Pass all the data to mpipe for egress. */ -static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, - struct sk_buff *skb, unsigned char *headers, s64 slot) -{ - struct skb_shared_info *sh = skb_shinfo(skb); - int instance = mpipe_instance(dev); - struct mpipe_data *md = &mpipe_data[instance]; - unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->len - sh_len; - unsigned int p_len = sh->gso_size; - gxio_mpipe_edesc_t edesc_head = { { 0 } }; - gxio_mpipe_edesc_t edesc_body = { { 0 } }; - long f_id = -1; /* id of the current fragment */ - long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ - long f_used = 0; /* bytes used from the current fragment */ - void *f_data = skb->data + sh_len; - long n; /* size of the current piece of payload */ - unsigned long tx_packets = 0, tx_bytes = 0; - unsigned int csum_start; - int segment; - - /* Prepare to egress the headers: set up header edesc. */ - csum_start = skb_checksum_start_offset(skb); - edesc_head.csum = 1; - edesc_head.csum_start = csum_start; - edesc_head.csum_dest = csum_start + skb->csum_offset; - edesc_head.xfer_size = sh_len; - - /* This is only used to specify the TLB. */ - edesc_head.stack_idx = md->first_buffer_stack; - edesc_body.stack_idx = md->first_buffer_stack; - - /* Egress all the edescs. */ - for (segment = 0; segment < sh->gso_segs; segment++) { - unsigned char *buf; - unsigned int p_used = 0; - - /* Egress the header. */ - buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + - NET_IP_ALIGN; - edesc_head.va = va_to_tile_io_addr(buf); - gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); - slot++; - - /* Egress the payload. */ - while (p_used < p_len) { - void *va; - - /* Advance as needed. */ - while (f_used >= f_size) { - f_id++; - f_size = skb_frag_size(&sh->frags[f_id]); - f_data = tile_net_frag_buf(&sh->frags[f_id]); - f_used = 0; - } - - va = f_data + f_used; - - /* Use bytes from the current fragment. */ - n = p_len - p_used; - if (n > f_size - f_used) - n = f_size - f_used; - f_used += n; - p_used += n; - - /* Egress a piece of the payload. */ - edesc_body.va = va_to_tile_io_addr(va); - edesc_body.xfer_size = n; - edesc_body.bound = !(p_used < p_len); - gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); - slot++; - } - - tx_packets++; - tx_bytes += sh_len + p_len; - - /* The last segment may be less than gso_size. */ - data_len -= p_len; - if (data_len < p_len) - p_len = data_len; - } - - /* Update stats. */ - tile_net_stats_add(tx_packets, &dev->stats.tx_packets); - tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes); -} - -/* Do "TSO" handling for egress. - * - * Normally drivers set NETIF_F_TSO only to support hardware TSO; - * otherwise the stack uses scatter-gather to implement GSO in software. - * On our testing, enabling GSO support (via NETIF_F_SG) drops network - * performance down to around 7.5 Gbps on the 10G interfaces, although - * also dropping cpu utilization way down, to under 8%. But - * implementing "TSO" in the driver brings performance back up to line - * rate, while dropping cpu usage even further, to less than 4%. In - * practice, profiling of GSO shows that skb_segment() is what causes - * the performance overheads; we benefit in the driver from using - * preallocated memory to duplicate the TCP/IP headers. - */ -static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - struct tile_net_priv *priv = netdev_priv(dev); - int channel = priv->echannel; - int instance = priv->instance; - struct mpipe_data *md = &mpipe_data[instance]; - struct tile_net_egress *egress = &md->egress_for_echannel[channel]; - struct tile_net_comps *comps = - info->mpipe[instance].comps_for_echannel[channel]; - gxio_mpipe_equeue_t *equeue = egress->equeue; - unsigned long irqflags; - int num_edescs; - s64 slot; - - /* Determine how many mpipe edesc's are needed. */ - num_edescs = tso_count_edescs(skb); - - local_irq_save(irqflags); - - /* Try to acquire a completion entry and an egress slot. */ - slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, - equeue, num_edescs); - if (slot < 0) { - local_irq_restore(irqflags); - return NETDEV_TX_BUSY; - } - - /* Set up copies of header data properly. */ - tso_headers_prepare(skb, egress->headers, slot); - - /* Actually pass the data to the network hardware. */ - tso_egress(dev, equeue, skb, egress->headers, slot); - - /* Add a completion record. */ - add_comp(equeue, comps, slot + num_edescs - 1, skb); - - local_irq_restore(irqflags); - - /* Make sure the egress timer is scheduled. */ - tile_net_schedule_egress_timer(); - - return NETDEV_TX_OK; -} - -/* Analyze the body and frags for a transmit request. */ -static unsigned int tile_net_tx_frags(struct frag *frags, - struct sk_buff *skb, - void *b_data, unsigned int b_len) -{ - unsigned int i, n = 0; - - struct skb_shared_info *sh = skb_shinfo(skb); - - if (b_len != 0) { - frags[n].buf = b_data; - frags[n++].length = b_len; - } - - for (i = 0; i < sh->nr_frags; i++) { - skb_frag_t *f = &sh->frags[i]; - frags[n].buf = tile_net_frag_buf(f); - frags[n++].length = skb_frag_size(f); - } - - return n; -} - -/* Help the kernel transmit a packet. */ -static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - struct tile_net_priv *priv = netdev_priv(dev); - int instance = priv->instance; - struct mpipe_data *md = &mpipe_data[instance]; - struct tile_net_egress *egress = - &md->egress_for_echannel[priv->echannel]; - gxio_mpipe_equeue_t *equeue = egress->equeue; - struct tile_net_comps *comps = - info->mpipe[instance].comps_for_echannel[priv->echannel]; - unsigned int len = skb->len; - unsigned char *data = skb->data; - unsigned int num_edescs; - struct frag frags[MAX_FRAGS]; - gxio_mpipe_edesc_t edescs[MAX_FRAGS]; - unsigned long irqflags; - gxio_mpipe_edesc_t edesc = { { 0 } }; - unsigned int i; - s64 slot; - - if (skb_is_gso(skb)) - return tile_net_tx_tso(skb, dev); - - num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); - - /* This is only used to specify the TLB. */ - edesc.stack_idx = md->first_buffer_stack; - - /* Prepare the edescs. */ - for (i = 0; i < num_edescs; i++) { - edesc.xfer_size = frags[i].length; - edesc.va = va_to_tile_io_addr(frags[i].buf); - edescs[i] = edesc; - } - - /* Mark the final edesc. */ - edescs[num_edescs - 1].bound = 1; - - /* Add checksum info to the initial edesc, if needed. */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - unsigned int csum_start = skb_checksum_start_offset(skb); - edescs[0].csum = 1; - edescs[0].csum_start = csum_start; - edescs[0].csum_dest = csum_start + skb->csum_offset; - } - - local_irq_save(irqflags); - - /* Try to acquire a completion entry and an egress slot. */ - slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, - equeue, num_edescs); - if (slot < 0) { - local_irq_restore(irqflags); - return NETDEV_TX_BUSY; - } - - for (i = 0; i < num_edescs; i++) - gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); - - /* Store TX timestamp if needed. */ - tile_tx_timestamp(skb, instance); - - /* Add a completion record. */ - add_comp(equeue, comps, slot - 1, skb); - - /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ - tile_net_stats_add(1, &dev->stats.tx_packets); - tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), - &dev->stats.tx_bytes); - - local_irq_restore(irqflags); - - /* Make sure the egress timer is scheduled. */ - tile_net_schedule_egress_timer(); - - return NETDEV_TX_OK; -} - -/* Return subqueue id on this core (one per core). */ -static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - return smp_processor_id(); -} - -/* Deal with a transmit timeout. */ -static void tile_net_tx_timeout(struct net_device *dev) -{ - int cpu; - - for_each_online_cpu(cpu) - netif_wake_subqueue(dev, cpu); -} - -/* Ioctl commands. */ -static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - if (cmd == SIOCSHWTSTAMP) - return tile_hwtstamp_set(dev, rq); - if (cmd == SIOCGHWTSTAMP) - return tile_hwtstamp_get(dev, rq); - - return -EOPNOTSUPP; -} - -/* Change the Ethernet address of the NIC. - * - * The hypervisor driver does not support changing MAC address. However, - * the hardware does not do anything with the MAC address, so the address - * which gets used on outgoing packets, and which is accepted on incoming - * packets, is completely up to us. - * - * Returns 0 on success, negative on failure. - */ -static int tile_net_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void tile_net_netpoll(struct net_device *dev) -{ - int instance = mpipe_instance(dev); - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - struct mpipe_data *md = &mpipe_data[instance]; - - disable_percpu_irq(md->ingress_irq); - napi_schedule(&info->mpipe[instance].napi); - enable_percpu_irq(md->ingress_irq, 0); -} -#endif - -static const struct net_device_ops tile_net_ops = { - .ndo_open = tile_net_open, - .ndo_stop = tile_net_stop, - .ndo_start_xmit = tile_net_tx, - .ndo_select_queue = tile_net_select_queue, - .ndo_do_ioctl = tile_net_ioctl, - .ndo_tx_timeout = tile_net_tx_timeout, - .ndo_set_mac_address = tile_net_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tile_net_netpoll, -#endif -}; - -/* The setup function. - * - * This uses ether_setup() to assign various fields in dev, including - * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. - */ -static void tile_net_setup(struct net_device *dev) -{ - netdev_features_t features = 0; - - ether_setup(dev); - dev->netdev_ops = &tile_net_ops; - dev->watchdog_timeo = TILE_NET_TIMEOUT; - - /* MTU range: 68 - 1500 or 9000 */ - dev->mtu = ETH_DATA_LEN; - dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = jumbo_num ? TILE_JUMBO_MAX_MTU : ETH_DATA_LEN; - - features |= NETIF_F_HW_CSUM; - features |= NETIF_F_SG; - features |= NETIF_F_TSO; - features |= NETIF_F_TSO6; - - dev->hw_features |= features; - dev->vlan_features |= features; - dev->features |= features; -} - -/* Allocate the device structure, register the device, and obtain the - * MAC address from the hypervisor. - */ -static void tile_net_dev_init(const char *name, const uint8_t *mac) -{ - int ret; - struct net_device *dev; - struct tile_net_priv *priv; - - /* HACK: Ignore "loop" links. */ - if (strncmp(name, "loop", 4) == 0) - return; - - /* Allocate the device structure. Normally, "name" is a - * template, instantiated by register_netdev(), but not for us. - */ - dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN, - tile_net_setup, NR_CPUS, 1); - if (!dev) { - pr_err("alloc_netdev_mqs(%s) failed\n", name); - return; - } - - /* Initialize "priv". */ - priv = netdev_priv(dev); - priv->dev = dev; - priv->channel = -1; - priv->loopify_channel = -1; - priv->echannel = -1; - init_ptp_dev(priv); - - /* Get the MAC address and set it in the device struct; this must - * be done before the device is opened. If the MAC is all zeroes, - * we use a random address, since we're probably on the simulator. - */ - if (!is_zero_ether_addr(mac)) - ether_addr_copy(dev->dev_addr, mac); - else - eth_hw_addr_random(dev); - - /* Register the network device. */ - ret = register_netdev(dev); - if (ret) { - netdev_err(dev, "register_netdev failed %d\n", ret); - free_netdev(dev); - return; - } -} - -/* Per-cpu module initialization. */ -static void tile_net_init_module_percpu(void *unused) -{ - struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); - int my_cpu = smp_processor_id(); - int instance; - - for (instance = 0; instance < NR_MPIPE_MAX; instance++) { - info->mpipe[instance].has_iqueue = false; - info->mpipe[instance].instance = instance; - } - info->my_cpu = my_cpu; - - /* Initialize the egress timer. */ - hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - info->egress_timer.function = tile_net_handle_egress_timer; -} - -/* Module initialization. */ -static int __init tile_net_init_module(void) -{ - int i; - char name[GXIO_MPIPE_LINK_NAME_LEN]; - uint8_t mac[6]; - - pr_info("Tilera Network Driver\n"); - - BUILD_BUG_ON(NR_MPIPE_MAX != 2); - - mutex_init(&tile_net_devs_for_channel_mutex); - - /* Initialize each CPU. */ - on_each_cpu(tile_net_init_module_percpu, NULL, 1); - - /* Find out what devices we have, and initialize them. */ - for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) - tile_net_dev_init(name, mac); - - if (!network_cpus_init()) - cpumask_and(&network_cpus_map, - housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask); - - return 0; -} - -module_init(tile_net_init_module); diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c deleted file mode 100644 index 56d06282fbde..000000000000 --- a/drivers/net/ethernet/tile/tilepro.c +++ /dev/null @@ -1,2397 +0,0 @@ -/* - * Copyright 2011 Tilera Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation, version 2. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for - * more details. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/moduleparam.h> -#include <linux/sched.h> -#include <linux/kernel.h> /* printk() */ -#include <linux/slab.h> /* kmalloc() */ -#include <linux/errno.h> /* error codes */ -#include <linux/types.h> /* size_t */ -#include <linux/interrupt.h> -#include <linux/in.h> -#include <linux/netdevice.h> /* struct device, and other headers */ -#include <linux/etherdevice.h> /* eth_type_trans */ -#include <linux/skbuff.h> -#include <linux/ioctl.h> -#include <linux/cdev.h> -#include <linux/hugetlb.h> -#include <linux/in6.h> -#include <linux/timer.h> -#include <linux/io.h> -#include <linux/u64_stats_sync.h> -#include <asm/checksum.h> -#include <asm/homecache.h> - -#include <hv/drv_xgbe_intf.h> -#include <hv/drv_xgbe_impl.h> -#include <hv/hypervisor.h> -#include <hv/netio_intf.h> - -/* For TSO */ -#include <linux/ip.h> -#include <linux/tcp.h> - - -/* - * First, "tile_net_init_module()" initializes all four "devices" which - * can be used by linux. - * - * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes - * the network cpus, then uses "tile_net_open_aux()" to initialize - * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all - * the tiles, provide buffers to LIPP, allow ingress to start, and - * turn on hypervisor interrupt handling (and NAPI) on all tiles. - * - * If registration fails due to the link being down, then "retry_work" - * is used to keep calling "tile_net_open_inner()" until it succeeds. - * - * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to - * stop egress, drain the LIPP buffers, unregister all the tiles, stop - * LIPP/LEPP, and wipe the LEPP queue. - * - * We start out with the ingress interrupt enabled on each CPU. When - * this interrupt fires, we disable it, and call "napi_schedule()". - * This will cause "tile_net_poll()" to be called, which will pull - * packets from the netio queue, filtering them out, or passing them - * to "netif_receive_skb()". If our budget is exhausted, we will - * return, knowing we will be called again later. Otherwise, we - * reenable the ingress interrupt, and call "napi_complete()". - * - * HACK: Since disabling the ingress interrupt is not reliable, we - * ignore the interrupt if the global "active" flag is false. - * - * - * NOTE: The use of "native_driver" ensures that EPP exists, and that - * we are using "LIPP" and "LEPP". - * - * NOTE: Failing to free completions for an arbitrarily long time - * (which is defined to be illegal) does in fact cause bizarre - * problems. The "egress_timer" helps prevent this from happening. - */ - - -/* HACK: Allow use of "jumbo" packets. */ -/* This should be 1500 if "jumbo" is not set in LIPP. */ -/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ -/* ISSUE: This has not been thoroughly tested (except at 1500). */ -#define TILE_NET_MTU ETH_DATA_LEN - -/* HACK: Define this to verify incoming packets. */ -/* #define TILE_NET_VERIFY_INGRESS */ - -/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ -#define TILE_NET_TX_QUEUE_LEN 0 - -/* Define to dump packets (prints out the whole packet on tx and rx). */ -/* #define TILE_NET_DUMP_PACKETS */ - -/* Define to enable debug spew (all PDEBUG's are enabled). */ -/* #define TILE_NET_DEBUG */ - - -/* Define to activate paranoia checks. */ -/* #define TILE_NET_PARANOIA */ - -/* Default transmit lockup timeout period, in jiffies. */ -#define TILE_NET_TIMEOUT (5 * HZ) - -/* Default retry interval for bringing up the NetIO interface, in jiffies. */ -#define TILE_NET_RETRY_INTERVAL (5 * HZ) - -/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ -#define TILE_NET_DEVS 4 - - - -/* Paranoia. */ -#if NET_IP_ALIGN != LIPP_PACKET_PADDING -#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." -#endif - - -/* Debug print. */ -#ifdef TILE_NET_DEBUG -#define PDEBUG(fmt, args...) net_printk(fmt, ## args) -#else -#define PDEBUG(fmt, args...) -#endif - - -MODULE_AUTHOR("Tilera"); -MODULE_LICENSE("GPL"); - - -/* - * Queue of incoming packets for a specific cpu and device. - * - * Includes a pointer to the "system" data, and the actual "user" data. - */ -struct tile_netio_queue { - netio_queue_impl_t *__system_part; - netio_queue_user_impl_t __user_part; - -}; - - -/* - * Statistics counters for a specific cpu and device. - */ -struct tile_net_stats_t { - struct u64_stats_sync syncp; - u64 rx_packets; /* total packets received */ - u64 tx_packets; /* total packets transmitted */ - u64 rx_bytes; /* total bytes received */ - u64 tx_bytes; /* total bytes transmitted */ - u64 rx_errors; /* packets truncated or marked bad by hw */ - u64 rx_dropped; /* packets not for us or intf not up */ -}; - - -/* - * Info for a specific cpu and device. - * - * ISSUE: There is a "dev" pointer in "napi" as well. - */ -struct tile_net_cpu { - /* The NAPI struct. */ - struct napi_struct napi; - /* Packet queue. */ - struct tile_netio_queue queue; - /* Statistics. */ - struct tile_net_stats_t stats; - /* True iff NAPI is enabled. */ - bool napi_enabled; - /* True if this tile has successfully registered with the IPP. */ - bool registered; - /* True if the link was down last time we tried to register. */ - bool link_down; - /* True if "egress_timer" is scheduled. */ - bool egress_timer_scheduled; - /* Number of small sk_buffs which must still be provided. */ - unsigned int num_needed_small_buffers; - /* Number of large sk_buffs which must still be provided. */ - unsigned int num_needed_large_buffers; - /* A timer for handling egress completions. */ - struct timer_list egress_timer; -}; - - -/* - * Info for a specific device. - */ -struct tile_net_priv { - /* Our network device. */ - struct net_device *dev; - /* Pages making up the egress queue. */ - struct page *eq_pages; - /* Address of the actual egress queue. */ - lepp_queue_t *eq; - /* Protects "eq". */ - spinlock_t eq_lock; - /* The hypervisor handle for this interface. */ - int hv_devhdl; - /* The intr bit mask that IDs this device. */ - u32 intr_id; - /* True iff "tile_net_open_aux()" has succeeded. */ - bool partly_opened; - /* True iff the device is "active". */ - bool active; - /* Effective network cpus. */ - struct cpumask network_cpus_map; - /* Number of network cpus. */ - int network_cpus_count; - /* Credits per network cpu. */ - int network_cpus_credits; - /* For NetIO bringup retries. */ - struct delayed_work retry_work; - /* Quick access to per cpu data. */ - struct tile_net_cpu *cpu[NR_CPUS]; -}; - -/* Log2 of the number of small pages needed for the egress queue. */ -#define EQ_ORDER get_order(sizeof(lepp_queue_t)) -/* Size of the egress queue's pages. */ -#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) - -/* - * The actual devices (xgbe0, xgbe1, gbe0, gbe1). - */ -static struct net_device *tile_net_devs[TILE_NET_DEVS]; - -/* - * The "tile_net_cpu" structures for each device. - */ -static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); -static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); -static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); -static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); - - -/* - * True if "network_cpus" was specified. - */ -static bool network_cpus_used; - -/* - * The actual cpus in "network_cpus". - */ -static struct cpumask network_cpus_map; - - - -#ifdef TILE_NET_DEBUG -/* - * printk with extra stuff. - * - * We print the CPU we're running in brackets. - */ -static void net_printk(char *fmt, ...) -{ - int i; - int len; - va_list args; - static char buf[256]; - - len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); - va_start(args, fmt); - i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); - va_end(args); - buf[255] = '\0'; - pr_notice(buf); -} -#endif - - -#ifdef TILE_NET_DUMP_PACKETS -/* - * Dump a packet. - */ -static void dump_packet(unsigned char *data, unsigned long length, char *s) -{ - int my_cpu = smp_processor_id(); - - unsigned long i; - char buf[128]; - - static unsigned int count; - - pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", - data, length, s, count++); - - pr_info("\n"); - - for (i = 0; i < length; i++) { - if ((i & 0xf) == 0) - sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); - sprintf(buf + strlen(buf), " %2.2x", data[i]); - if ((i & 0xf) == 0xf || i == length - 1) { - strcat(buf, "\n"); - pr_info("%s", buf); - } - } -} -#endif - - -/* - * Provide support for the __netio_fastio1() swint - * (see <hv/drv_xgbe_intf.h> for how it is used). - * - * The fastio swint2 call may clobber all the caller-saved registers. - * It rarely clobbers memory, but we allow for the possibility in - * the signature just to be on the safe side. - * - * Also, gcc doesn't seem to allow an input operand to be - * clobbered, so we fake it with dummy outputs. - * - * This function can't be static because of the way it is declared - * in the netio header. - */ -inline int __netio_fastio1(u32 fastio_index, u32 arg0) -{ - long result, clobber_r1, clobber_r10; - asm volatile("swint2" - : "=R00" (result), - "=R01" (clobber_r1), "=R10" (clobber_r10) - : "R10" (fastio_index), "R01" (arg0) - : "memory", "r2", "r3", "r4", - "r5", "r6", "r7", "r8", "r9", - "r11", "r12", "r13", "r14", - "r15", "r16", "r17", "r18", "r19", - "r20", "r21", "r22", "r23", "r24", - "r25", "r26", "r27", "r28", "r29"); - return result; -} - - -static void tile_net_return_credit(struct tile_net_cpu *info) -{ - struct tile_netio_queue *queue = &info->queue; - netio_queue_user_impl_t *qup = &queue->__user_part; - - /* Return four credits after every fourth packet. */ - if (--qup->__receive_credit_remaining == 0) { - u32 interval = qup->__receive_credit_interval; - qup->__receive_credit_remaining = interval; - __netio_fastio_return_credits(qup->__fastio_index, interval); - } -} - - - -/* - * Provide a linux buffer to LIPP. - */ -static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, - void *va, bool small) -{ - struct tile_netio_queue *queue = &info->queue; - - /* Convert "va" and "small" to "linux_buffer_t". */ - unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; - - __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); -} - - -/* - * Provide a linux buffer for LIPP. - * - * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", - * plus a chunk of memory that includes not only the requested bytes, but - * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". - * - * Note that "struct skb_shared_info" is 88 bytes with 64K pages and - * 268 bytes with 4K pages (since the frags[] array needs 18 entries). - * - * Without jumbo packets, the maximum packet size will be 1536 bytes, - * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told - * the hardware to clip at 1518 bytes instead of 1536 bytes, then we - * could save an entire cache line, but in practice, we don't need it. - * - * Since CPAs are 38 bits, and we can only encode the high 31 bits in - * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must - * align the actual "va" mod 128. - * - * We assume that the underlying "head" will be aligned mod 64. Note - * that in practice, we have seen "head" NOT aligned mod 128 even when - * using 2048 byte allocations, which is surprising. - * - * If "head" WAS always aligned mod 128, we could change LIPP to - * assume that the low SIX bits are zero, and the 7th bit is one, that - * is, align the actual "va" mod 128 plus 64, which would be "free". - * - * For now, the actual "head" pointer points at NET_SKB_PAD bytes of - * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff - * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for - * the actual packet, plus 62 bytes of empty padding, plus some - * padding and the "struct skb_shared_info". - * - * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 - * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. - * - * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 - * bytes, or 344 bytes, which means we are wasting 64+ bytes, and - * could presumably increase the size of small buffers. - * - * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 - * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. - * - * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 - * bytes, or 524 bytes, which is annoyingly wasteful. - * - * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? - * - * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? - */ -static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, - bool small) -{ -#if TILE_NET_MTU <= 1536 - /* Without "jumbo", 2 + 1536 should be sufficient. */ - unsigned int large_size = NET_IP_ALIGN + 1536; -#else - /* ISSUE: This has not been tested. */ - unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; -#endif - - /* Avoid "false sharing" with last cache line. */ - /* ISSUE: This is already done by "netdev_alloc_skb()". */ - unsigned int len = - (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + - CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); - - unsigned int padding = 128 - NET_SKB_PAD; - unsigned int align; - - struct sk_buff *skb; - void *va; - - struct sk_buff **skb_ptr; - - /* Request 96 extra bytes for alignment purposes. */ - skb = netdev_alloc_skb(info->napi.dev, len + padding); - if (skb == NULL) - return false; - - /* Skip 32 or 96 bytes to align "data" mod 128. */ - align = -(long)skb->data & (128 - 1); - BUG_ON(align > padding); - skb_reserve(skb, align); - - /* This address is given to IPP. */ - va = skb->data; - - /* Buffers must not span a huge page. */ - BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); - -#ifdef TILE_NET_PARANOIA -#if CHIP_HAS_CBOX_HOME_MAP() - if (hash_default) { - HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); - if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) - panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", - va, hv_pte_get_mode(pte), hv_pte_val(pte)); - } -#endif -#endif - - /* Invalidate the packet buffer. */ - if (!hash_default) - __inv_buffer(va, len); - - /* Skip two bytes to satisfy LIPP assumptions. */ - /* Note that this aligns IP on a 16 byte boundary. */ - /* ISSUE: Do this when the packet arrives? */ - skb_reserve(skb, NET_IP_ALIGN); - - /* Save a back-pointer to 'skb'. */ - skb_ptr = va - sizeof(*skb_ptr); - *skb_ptr = skb; - - /* Make sure "skb_ptr" has been flushed. */ - __insn_mf(); - - /* Provide the new buffer. */ - tile_net_provide_linux_buffer(info, va, small); - - return true; -} - - -/* - * Provide linux buffers for LIPP. - */ -static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) -{ - while (info->num_needed_small_buffers != 0) { - if (!tile_net_provide_needed_buffer(info, true)) - goto oops; - info->num_needed_small_buffers--; - } - - while (info->num_needed_large_buffers != 0) { - if (!tile_net_provide_needed_buffer(info, false)) - goto oops; - info->num_needed_large_buffers--; - } - - return; - -oops: - - /* Add a description to the page allocation failure dump. */ - pr_notice("Could not provide a linux buffer to LIPP.\n"); -} - - -/* - * Grab some LEPP completions, and store them in "comps", of size - * "comps_size", and return the number of completions which were - * stored, so the caller can free them. - */ -static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, - struct sk_buff *comps[], - unsigned int comps_size, - unsigned int min_size) -{ - unsigned int n = 0; - - unsigned int comp_head = eq->comp_head; - unsigned int comp_busy = eq->comp_busy; - - while (comp_head != comp_busy && n < comps_size) { - comps[n++] = eq->comps[comp_head]; - LEPP_QINC(comp_head); - } - - if (n < min_size) - return 0; - - eq->comp_head = comp_head; - - return n; -} - - -/* - * Free some comps, and return true iff there are still some pending. - */ -static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) -{ - struct tile_net_priv *priv = netdev_priv(dev); - - lepp_queue_t *eq = priv->eq; - - struct sk_buff *olds[64]; - unsigned int wanted = 64; - unsigned int i, n; - bool pending; - - spin_lock(&priv->eq_lock); - - if (all) - eq->comp_busy = eq->comp_tail; - - n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); - - pending = (eq->comp_head != eq->comp_tail); - - spin_unlock(&priv->eq_lock); - - for (i = 0; i < n; i++) - kfree_skb(olds[i]); - - return pending; -} - - -/* - * Make sure the egress timer is scheduled. - * - * Note that we use "schedule if not scheduled" logic instead of the more - * obvious "reschedule" logic, because "reschedule" is fairly expensive. - */ -static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) -{ - if (!info->egress_timer_scheduled) { - mod_timer(&info->egress_timer, jiffies + 1); - info->egress_timer_scheduled = true; - } -} - - -/* - * The "function" for "info->egress_timer". - * - * This timer will reschedule itself as long as there are any pending - * completions expected (on behalf of any tile). - * - * ISSUE: Realistically, will the timer ever stop scheduling itself? - * - * ISSUE: This timer is almost never actually needed, so just use a global - * timer that can run on any tile. - * - * ISSUE: Maybe instead track number of expected completions, and free - * only that many, resetting to zero if "pending" is ever false. - */ -static void tile_net_handle_egress_timer(struct timer_list *t) -{ - struct tile_net_cpu *info = from_timer(info, t, egress_timer); - struct net_device *dev = info->napi.dev; - - /* The timer is no longer scheduled. */ - info->egress_timer_scheduled = false; - - /* Free comps, and reschedule timer if more are pending. */ - if (tile_net_lepp_free_comps(dev, false)) - tile_net_schedule_egress_timer(info); -} - - -static void tile_net_discard_aux(struct tile_net_cpu *info, int index) -{ - struct tile_netio_queue *queue = &info->queue; - netio_queue_impl_t *qsp = queue->__system_part; - netio_queue_user_impl_t *qup = &queue->__user_part; - - int index2_aux = index + sizeof(netio_pkt_t); - int index2 = - ((index2_aux == - qsp->__packet_receive_queue.__last_packet_plus_one) ? - 0 : index2_aux); - - netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); - - /* Extract the "linux_buffer_t". */ - unsigned int buffer = pkt->__packet.word; - - /* Convert "linux_buffer_t" to "va". */ - void *va = __va((phys_addr_t)(buffer >> 1) << 7); - - /* Acquire the associated "skb". */ - struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); - struct sk_buff *skb = *skb_ptr; - - kfree_skb(skb); - - /* Consume this packet. */ - qup->__packet_receive_read = index2; -} - - -/* - * Like "tile_net_poll()", but just discard packets. - */ -static void tile_net_discard_packets(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - struct tile_netio_queue *queue = &info->queue; - netio_queue_impl_t *qsp = queue->__system_part; - netio_queue_user_impl_t *qup = &queue->__user_part; - - while (qup->__packet_receive_read != - qsp->__packet_receive_queue.__packet_write) { - int index = qup->__packet_receive_read; - tile_net_discard_aux(info, index); - } -} - - -/* - * Handle the next packet. Return true if "processed", false if "filtered". - */ -static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) -{ - struct net_device *dev = info->napi.dev; - - struct tile_netio_queue *queue = &info->queue; - netio_queue_impl_t *qsp = queue->__system_part; - netio_queue_user_impl_t *qup = &queue->__user_part; - struct tile_net_stats_t *stats = &info->stats; - - int filter; - - int index2_aux = index + sizeof(netio_pkt_t); - int index2 = - ((index2_aux == - qsp->__packet_receive_queue.__last_packet_plus_one) ? - 0 : index2_aux); - - netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); - - netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); - netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); - - /* Extract the packet size. FIXME: Shouldn't the second line */ - /* get subtracted? Mostly moot, since it should be "zero". */ - unsigned long len = - (NETIO_PKT_CUSTOM_LENGTH(pkt) + - NET_IP_ALIGN - NETIO_PACKET_PADDING); - - /* Extract the "linux_buffer_t". */ - unsigned int buffer = pkt->__packet.word; - - /* Extract "small" (vs "large"). */ - bool small = ((buffer & 1) != 0); - - /* Convert "linux_buffer_t" to "va". */ - void *va = __va((phys_addr_t)(buffer >> 1) << 7); - - /* Extract the packet data pointer. */ - /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ - unsigned char *buf = va + NET_IP_ALIGN; - - /* Invalidate the packet buffer. */ - if (!hash_default) - __inv_buffer(buf, len); - -#ifdef TILE_NET_DUMP_PACKETS - dump_packet(buf, len, "rx"); -#endif /* TILE_NET_DUMP_PACKETS */ - -#ifdef TILE_NET_VERIFY_INGRESS - if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { - dump_packet(buf, len, "rx"); - panic("Unexpected OVERSIZE."); - } -#endif - - filter = 0; - - if (pkt_status == NETIO_PKT_STATUS_BAD) { - /* Handle CRC error and hardware truncation. */ - filter = 2; - } else if (!(dev->flags & IFF_UP)) { - /* Filter packets received before we're up. */ - filter = 1; - } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && - pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { - /* Filter "truncated" packets. */ - filter = 2; - } else if (!(dev->flags & IFF_PROMISC)) { - if (!is_multicast_ether_addr(buf)) { - /* Filter packets not for our address. */ - const u8 *mine = dev->dev_addr; - filter = !ether_addr_equal(mine, buf); - } - } - - u64_stats_update_begin(&stats->syncp); - - if (filter != 0) { - - if (filter == 1) - stats->rx_dropped++; - else - stats->rx_errors++; - - tile_net_provide_linux_buffer(info, va, small); - - } else { - - /* Acquire the associated "skb". */ - struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); - struct sk_buff *skb = *skb_ptr; - - /* Paranoia. */ - if (skb->data != buf) - panic("Corrupt linux buffer from LIPP! " - "VA=%p, skb=%p, skb->data=%p\n", - va, skb, skb->data); - - /* Encode the actual packet length. */ - skb_put(skb, len); - - /* NOTE: This call also sets "skb->dev = dev". */ - skb->protocol = eth_type_trans(skb, dev); - - /* Avoid recomputing "good" TCP/UDP checksums. */ - if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - netif_receive_skb(skb); - - stats->rx_packets++; - stats->rx_bytes += len; - } - - u64_stats_update_end(&stats->syncp); - - /* ISSUE: It would be nice to defer this until the packet has */ - /* actually been processed. */ - tile_net_return_credit(info); - - /* Consume this packet. */ - qup->__packet_receive_read = index2; - - return !filter; -} - - -/* - * Handle some packets for the given device on the current CPU. - * - * If "tile_net_stop()" is called on some other tile while this - * function is running, we will return, hopefully before that - * other tile asks us to call "napi_disable()". - * - * The "rotting packet" race condition occurs if a packet arrives - * during the extremely narrow window between the queue appearing to - * be empty, and the ingress interrupt being re-enabled. This happens - * a LOT under heavy network load. - */ -static int tile_net_poll(struct napi_struct *napi, int budget) -{ - struct net_device *dev = napi->dev; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - struct tile_netio_queue *queue = &info->queue; - netio_queue_impl_t *qsp = queue->__system_part; - netio_queue_user_impl_t *qup = &queue->__user_part; - - unsigned int work = 0; - - if (budget <= 0) - goto done; - - while (priv->active) { - int index = qup->__packet_receive_read; - if (index == qsp->__packet_receive_queue.__packet_write) - break; - - if (tile_net_poll_aux(info, index)) { - if (++work >= budget) - goto done; - } - } - - napi_complete_done(&info->napi, work); - - if (!priv->active) - goto done; - - /* Re-enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id, 0); - - /* HACK: Avoid the "rotting packet" problem (see above). */ - if (qup->__packet_receive_read != - qsp->__packet_receive_queue.__packet_write) { - /* ISSUE: Sometimes this returns zero, presumably */ - /* because an interrupt was handled for this tile. */ - (void)napi_reschedule(&info->napi); - } - -done: - - if (priv->active) - tile_net_provide_needed_buffers(info); - - return work; -} - - -/* - * Handle an ingress interrupt for the given device on the current cpu. - * - * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has - * been called! This is probably due to "pending hypervisor downcalls". - * - * ISSUE: Is there any race condition between the "napi_schedule()" here - * and the "napi_complete()" call above? - */ -static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - - /* Disable the ingress interrupt. */ - disable_percpu_irq(priv->intr_id); - - /* Ignore unwanted interrupts. */ - if (!priv->active) - return IRQ_HANDLED; - - /* ISSUE: Sometimes "info->napi_enabled" is false here. */ - - napi_schedule(&info->napi); - - return IRQ_HANDLED; -} - - -/* - * One time initialization per interface. - */ -static int tile_net_open_aux(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - - int ret; - int dummy; - unsigned int epp_lotar; - - /* - * Find out where EPP memory should be homed. - */ - ret = hv_dev_pread(priv->hv_devhdl, 0, - (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), - NETIO_EPP_SHM_OFF); - if (ret < 0) { - pr_err("could not read epp_shm_queue lotar.\n"); - return -EIO; - } - - /* - * Home the page on the EPP. - */ - { - int epp_home = hv_lotar_to_cpu(epp_lotar); - homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); - } - - /* - * Register the EPP shared memory queue. - */ - { - netio_ipp_address_t ea = { - .va = 0, - .pa = __pa(priv->eq), - .pte = hv_pte(0), - .size = EQ_SIZE, - }; - ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); - ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); - ret = hv_dev_pwrite(priv->hv_devhdl, 0, - (HV_VirtAddr)&ea, - sizeof(ea), - NETIO_EPP_SHM_OFF); - if (ret < 0) - return -EIO; - } - - /* - * Start LIPP/LEPP. - */ - if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, - sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { - pr_warn("Failed to start LIPP/LEPP\n"); - return -EIO; - } - - return 0; -} - - -/* - * Register with hypervisor on the current CPU. - * - * Strangely, this function does important things even if it "fails", - * which is especially common if the link is not up yet. Hopefully - * these things are all "harmless" if done twice! - */ -static void tile_net_register(void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info; - - struct tile_netio_queue *queue; - - /* Only network cpus can receive packets. */ - int queue_id = - cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; - - netio_input_config_t config = { - .flags = 0, - .num_receive_packets = priv->network_cpus_credits, - .queue_id = queue_id - }; - - int ret = 0; - netio_queue_impl_t *queuep; - - PDEBUG("tile_net_register(queue_id %d)\n", queue_id); - - if (!strcmp(dev->name, "xgbe0")) - info = this_cpu_ptr(&hv_xgbe0); - else if (!strcmp(dev->name, "xgbe1")) - info = this_cpu_ptr(&hv_xgbe1); - else if (!strcmp(dev->name, "gbe0")) - info = this_cpu_ptr(&hv_gbe0); - else if (!strcmp(dev->name, "gbe1")) - info = this_cpu_ptr(&hv_gbe1); - else - BUG(); - - /* Initialize the egress timer. */ - timer_setup(&info->egress_timer, tile_net_handle_egress_timer, - TIMER_PINNED); - - u64_stats_init(&info->stats.syncp); - - priv->cpu[my_cpu] = info; - - /* - * Register ourselves with LIPP. This does a lot of stuff, - * including invoking the LIPP registration code. - */ - ret = hv_dev_pwrite(priv->hv_devhdl, 0, - (HV_VirtAddr)&config, - sizeof(netio_input_config_t), - NETIO_IPP_INPUT_REGISTER_OFF); - PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", - ret); - if (ret < 0) { - if (ret != NETIO_LINK_DOWN) { - printk(KERN_DEBUG "hv_dev_pwrite " - "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", - ret); - } - info->link_down = (ret == NETIO_LINK_DOWN); - return; - } - - /* - * Get the pointer to our queue's system part. - */ - - ret = hv_dev_pread(priv->hv_devhdl, 0, - (HV_VirtAddr)&queuep, - sizeof(netio_queue_impl_t *), - NETIO_IPP_INPUT_REGISTER_OFF); - PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", - ret); - PDEBUG("queuep %p\n", queuep); - if (ret <= 0) { - /* ISSUE: Shouldn't this be a fatal error? */ - pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); - return; - } - - queue = &info->queue; - - queue->__system_part = queuep; - - memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); - - /* This is traditionally "config.num_receive_packets / 2". */ - queue->__user_part.__receive_credit_interval = 4; - queue->__user_part.__receive_credit_remaining = - queue->__user_part.__receive_credit_interval; - - /* - * Get a fastio index from the hypervisor. - * ISSUE: Shouldn't this check the result? - */ - ret = hv_dev_pread(priv->hv_devhdl, 0, - (HV_VirtAddr)&queue->__user_part.__fastio_index, - sizeof(queue->__user_part.__fastio_index), - NETIO_IPP_GET_FASTIO_OFF); - PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); - - /* Now we are registered. */ - info->registered = true; -} - - -/* - * Deregister with hypervisor on the current CPU. - * - * This simply discards all our credits, so no more packets will be - * delivered to this tile. There may still be packets in our queue. - * - * Also, disable the ingress interrupt. - */ -static void tile_net_deregister(void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - - /* Disable the ingress interrupt. */ - disable_percpu_irq(priv->intr_id); - - /* Do nothing else if not registered. */ - if (info == NULL || !info->registered) - return; - - { - struct tile_netio_queue *queue = &info->queue; - netio_queue_user_impl_t *qup = &queue->__user_part; - - /* Discard all our credits. */ - __netio_fastio_return_credits(qup->__fastio_index, -1); - } -} - - -/* - * Unregister with hypervisor on the current CPU. - * - * Also, disable the ingress interrupt. - */ -static void tile_net_unregister(void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - - int ret; - int dummy = 0; - - /* Disable the ingress interrupt. */ - disable_percpu_irq(priv->intr_id); - - /* Do nothing else if not registered. */ - if (info == NULL || !info->registered) - return; - - /* Unregister ourselves with LIPP/LEPP. */ - ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, - sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); - if (ret < 0) - panic("Failed to unregister with LIPP/LEPP!\n"); - - /* Discard all packets still in our NetIO queue. */ - tile_net_discard_packets(dev); - - /* Reset state. */ - info->num_needed_small_buffers = 0; - info->num_needed_large_buffers = 0; - - /* Cancel egress timer. */ - del_timer(&info->egress_timer); - info->egress_timer_scheduled = false; -} - - -/* - * Helper function for "tile_net_stop()". - * - * Also used to handle registration failure in "tile_net_open_inner()", - * when the various extra steps in "tile_net_stop()" are not necessary. - */ -static void tile_net_stop_aux(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int i; - - int dummy = 0; - - /* - * Unregister all tiles, so LIPP will stop delivering packets. - * Also, delete all the "napi" objects (sequentially, to protect - * "dev->napi_list"). - */ - on_each_cpu(tile_net_unregister, (void *)dev, 1); - for_each_online_cpu(i) { - struct tile_net_cpu *info = priv->cpu[i]; - if (info != NULL && info->registered) { - netif_napi_del(&info->napi); - info->registered = false; - } - } - - /* Stop LIPP/LEPP. */ - if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, - sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) - panic("Failed to stop LIPP/LEPP!\n"); - - priv->partly_opened = false; -} - - -/* - * Disable NAPI for the given device on the current cpu. - */ -static void tile_net_stop_disable(void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - - /* Disable NAPI if needed. */ - if (info != NULL && info->napi_enabled) { - napi_disable(&info->napi); - info->napi_enabled = false; - } -} - - -/* - * Enable NAPI and the ingress interrupt for the given device - * on the current cpu. - * - * ISSUE: Only do this for "network cpus"? - */ -static void tile_net_open_enable(void *dev_ptr) -{ - struct net_device *dev = (struct net_device *)dev_ptr; - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - - /* Enable NAPI. */ - napi_enable(&info->napi); - info->napi_enabled = true; - - /* Enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id, 0); -} - - -/* - * tile_net_open_inner does most of the work of bringing up the interface. - * It's called from tile_net_open(), and also from tile_net_retry_open(). - * The return value is 0 if the interface was brought up, < 0 if - * tile_net_open() should return the return value as an error, and > 0 if - * tile_net_open() should return success and schedule a work item to - * periodically retry the bringup. - */ -static int tile_net_open_inner(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info; - struct tile_netio_queue *queue; - int result = 0; - int i; - int dummy = 0; - - /* - * First try to register just on the local CPU, and handle any - * semi-expected "link down" failure specially. Note that we - * do NOT call "tile_net_stop_aux()", unlike below. - */ - tile_net_register(dev); - info = priv->cpu[my_cpu]; - if (!info->registered) { - if (info->link_down) - return 1; - return -EAGAIN; - } - - /* - * Now register everywhere else. If any registration fails, - * even for "link down" (which might not be possible), we - * clean up using "tile_net_stop_aux()". Also, add all the - * "napi" objects (sequentially, to protect "dev->napi_list"). - * ISSUE: Only use "netif_napi_add()" for "network cpus"? - */ - smp_call_function(tile_net_register, (void *)dev, 1); - for_each_online_cpu(i) { - struct tile_net_cpu *info = priv->cpu[i]; - if (info->registered) - netif_napi_add(dev, &info->napi, tile_net_poll, 64); - else - result = -EAGAIN; - } - if (result != 0) { - tile_net_stop_aux(dev); - return result; - } - - queue = &info->queue; - - if (priv->intr_id == 0) { - unsigned int irq; - - /* - * Acquire the irq allocated by the hypervisor. Every - * queue gets the same irq. The "__intr_id" field is - * "1 << irq", so we use "__ffs()" to extract "irq". - */ - priv->intr_id = queue->__system_part->__intr_id; - BUG_ON(priv->intr_id == 0); - irq = __ffs(priv->intr_id); - - /* - * Register the ingress interrupt handler for this - * device, permanently. - * - * We used to call "free_irq()" in "tile_net_stop()", - * and then re-register the handler here every time, - * but that caused DNP errors in "handle_IRQ_event()" - * because "desc->action" was NULL. See bug 9143. - */ - tile_irq_activate(irq, TILE_IRQ_PERCPU); - BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, - 0, dev->name, (void *)dev) != 0); - } - - { - /* Allocate initial buffers. */ - - int max_buffers = - priv->network_cpus_count * priv->network_cpus_credits; - - info->num_needed_small_buffers = - min(LIPP_SMALL_BUFFERS, max_buffers); - - info->num_needed_large_buffers = - min(LIPP_LARGE_BUFFERS, max_buffers); - - tile_net_provide_needed_buffers(info); - - if (info->num_needed_small_buffers != 0 || - info->num_needed_large_buffers != 0) - panic("Insufficient memory for buffer stack!"); - } - - /* We are about to be active. */ - priv->active = true; - - /* Make sure "active" is visible to all tiles. */ - mb(); - - /* On each tile, enable NAPI and the ingress interrupt. */ - on_each_cpu(tile_net_open_enable, (void *)dev, 1); - - /* Start LIPP/LEPP and activate "ingress" at the shim. */ - if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, - sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) - panic("Failed to activate the LIPP Shim!\n"); - - /* Start our transmit queue. */ - netif_start_queue(dev); - - return 0; -} - - -/* - * Called periodically to retry bringing up the NetIO interface, - * if it doesn't come up cleanly during tile_net_open(). - */ -static void tile_net_open_retry(struct work_struct *w) -{ - struct delayed_work *dw = to_delayed_work(w); - - struct tile_net_priv *priv = - container_of(dw, struct tile_net_priv, retry_work); - - /* - * Try to bring the NetIO interface up. If it fails, reschedule - * ourselves to try again later; otherwise, tell Linux we now have - * a working link. ISSUE: What if the return value is negative? - */ - if (tile_net_open_inner(priv->dev) != 0) - schedule_delayed_work(&priv->retry_work, - TILE_NET_RETRY_INTERVAL); - else - netif_carrier_on(priv->dev); -} - - -/* - * Called when a network interface is made active. - * - * Returns 0 on success, negative value on failure. - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS (if needed), the watchdog timer - * is started, and the stack is notified that the interface is ready. - * - * If the actual link is not available yet, then we tell Linux that - * we have no carrier, and we keep checking until the link comes up. - */ -static int tile_net_open(struct net_device *dev) -{ - int ret = 0; - struct tile_net_priv *priv = netdev_priv(dev); - - /* - * We rely on priv->partly_opened to tell us if this is the - * first time this interface is being brought up. If it is - * set, the IPP was already initialized and should not be - * initialized again. - */ - if (!priv->partly_opened) { - - int count; - int credits; - - /* Initialize LIPP/LEPP, and start the Shim. */ - ret = tile_net_open_aux(dev); - if (ret < 0) { - pr_err("tile_net_open_aux failed: %d\n", ret); - return ret; - } - - /* Analyze the network cpus. */ - - if (network_cpus_used) - cpumask_copy(&priv->network_cpus_map, - &network_cpus_map); - else - cpumask_copy(&priv->network_cpus_map, cpu_online_mask); - - - count = cpumask_weight(&priv->network_cpus_map); - - /* Limit credits to available buffers, and apply min. */ - credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); - - /* Apply "GBE" max limit. */ - /* ISSUE: Use higher limit for XGBE? */ - credits = min(NETIO_MAX_RECEIVE_PKTS, credits); - - priv->network_cpus_count = count; - priv->network_cpus_credits = credits; - -#ifdef TILE_NET_DEBUG - pr_info("Using %d network cpus, with %d credits each\n", - priv->network_cpus_count, priv->network_cpus_credits); -#endif - - priv->partly_opened = true; - - } else { - /* FIXME: Is this possible? */ - /* printk("Already partly opened.\n"); */ - } - - /* - * Attempt to bring up the link. - */ - ret = tile_net_open_inner(dev); - if (ret <= 0) { - if (ret == 0) - netif_carrier_on(dev); - return ret; - } - - /* - * We were unable to bring up the NetIO interface, but we want to - * try again in a little bit. Tell Linux that we have no carrier - * so it doesn't try to use the interface before the link comes up - * and then remember to try again later. - */ - netif_carrier_off(dev); - schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); - - return 0; -} - - -static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) -{ - int n = 0; - - /* Drain all the LIPP buffers. */ - while (true) { - unsigned int buffer; - - /* NOTE: This should never fail. */ - if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, - sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) - break; - - /* Stop when done. */ - if (buffer == 0) - break; - - { - /* Convert "linux_buffer_t" to "va". */ - void *va = __va((phys_addr_t)(buffer >> 1) << 7); - - /* Acquire the associated "skb". */ - struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); - struct sk_buff *skb = *skb_ptr; - - kfree_skb(skb); - } - - n++; - } - - return n; -} - - -/* - * Disables a network interface. - * - * Returns 0, this is not allowed to fail. - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - * - * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? - * - * Before we are called by "__dev_close()", "netif_running()" will - * have been cleared, so no NEW calls to "tile_net_poll()" will be - * made by "netpoll_poll_dev()". - * - * Often, this can cause some tiles to still have packets in their - * queues, so we must call "tile_net_discard_packets()" later. - * - * Note that some other tile may still be INSIDE "tile_net_poll()", - * and in fact, many will be, if there is heavy network load. - * - * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when - * any tile is still "napi_schedule()"'d will induce a horrible crash - * when "msleep()" is called. This includes tiles which are inside - * "tile_net_poll()" which have not yet called "napi_complete()". - * - * So, we must first try to wait long enough for other tiles to finish - * with any current "tile_net_poll()" call, and, hopefully, to clear - * the "scheduled" flag. ISSUE: It is unclear what happens to tiles - * which have called "napi_schedule()" but which had not yet tried to - * call "tile_net_poll()", or which exhausted their budget inside - * "tile_net_poll()" just before this function was called. - */ -static int tile_net_stop(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - - PDEBUG("tile_net_stop()\n"); - - /* Start discarding packets. */ - priv->active = false; - - /* Make sure "active" is visible to all tiles. */ - mb(); - - /* - * On each tile, make sure no NEW packets get delivered, and - * disable the ingress interrupt. - * - * Note that the ingress interrupt can fire AFTER this, - * presumably due to packets which were recently delivered, - * but it will have no effect. - */ - on_each_cpu(tile_net_deregister, (void *)dev, 1); - - /* Optimistically drain LIPP buffers. */ - (void)tile_net_drain_lipp_buffers(priv); - - /* ISSUE: Only needed if not yet fully open. */ - cancel_delayed_work_sync(&priv->retry_work); - - /* Can't transmit any more. */ - netif_stop_queue(dev); - - /* Disable NAPI on each tile. */ - on_each_cpu(tile_net_stop_disable, (void *)dev, 1); - - /* - * Drain any remaining LIPP buffers. NOTE: This "printk()" - * has never been observed, but in theory it could happen. - */ - if (tile_net_drain_lipp_buffers(priv) != 0) - printk("Had to drain some extra LIPP buffers!\n"); - - /* Stop LIPP/LEPP. */ - tile_net_stop_aux(dev); - - /* - * ISSUE: It appears that, in practice anyway, by the time we - * get here, there are no pending completions, but just in case, - * we free (all of) them anyway. - */ - while (tile_net_lepp_free_comps(dev, true)) - /* loop */; - - /* Wipe the EPP queue, and wait till the stores hit the EPP. */ - memset(priv->eq, 0, sizeof(lepp_queue_t)); - mb(); - - return 0; -} - - -/* - * Prepare the "frags" info for the resulting LEPP command. - * - * If needed, flush the memory used by the frags. - */ -static unsigned int tile_net_tx_frags(lepp_frag_t *frags, - struct sk_buff *skb, - void *b_data, unsigned int b_len) -{ - unsigned int i, n = 0; - - struct skb_shared_info *sh = skb_shinfo(skb); - - phys_addr_t cpa; - - if (b_len != 0) { - - if (!hash_default) - finv_buffer_remote(b_data, b_len, 0); - - cpa = __pa(b_data); - frags[n].cpa_lo = cpa; - frags[n].cpa_hi = cpa >> 32; - frags[n].length = b_len; - frags[n].hash_for_home = hash_default; - n++; - } - - for (i = 0; i < sh->nr_frags; i++) { - - skb_frag_t *f = &sh->frags[i]; - unsigned long pfn = page_to_pfn(skb_frag_page(f)); - - /* FIXME: Compute "hash_for_home" properly. */ - /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ - int hash_for_home = hash_default; - - /* FIXME: Hmmm. */ - if (!hash_default) { - void *va = pfn_to_kaddr(pfn) + f->page_offset; - BUG_ON(PageHighMem(skb_frag_page(f))); - finv_buffer_remote(va, skb_frag_size(f), 0); - } - - cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; - frags[n].cpa_lo = cpa; - frags[n].cpa_hi = cpa >> 32; - frags[n].length = skb_frag_size(f); - frags[n].hash_for_home = hash_for_home; - n++; - } - - return n; -} - - -/* - * This function takes "skb", consisting of a header template and a - * payload, and hands it to LEPP, to emit as one or more segments, - * each consisting of a possibly modified header, plus a piece of the - * payload, via a process known as "tcp segmentation offload". - * - * Usually, "data" will contain the header template, of size "sh_len", - * and "sh->frags" will contain "skb->data_len" bytes of payload, and - * there will be "sh->gso_segs" segments. - * - * Sometimes, if "sendfile()" requires copying, we will be called with - * "data" containing the header and payload, with "frags" being empty. - * - * Sometimes, for example when using NFS over TCP, a single segment can - * span 3 fragments, which must be handled carefully in LEPP. - * - * See "emulate_large_send_offload()" for some reference code, which - * does not handle checksumming. - * - * ISSUE: How do we make sure that high memory DMA does not migrate? - */ -static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - struct tile_net_stats_t *stats = &info->stats; - - struct skb_shared_info *sh = skb_shinfo(skb); - - unsigned char *data = skb->data; - - /* The ip header follows the ethernet header. */ - struct iphdr *ih = ip_hdr(skb); - unsigned int ih_len = ih->ihl * 4; - - /* Note that "nh == ih", by definition. */ - unsigned char *nh = skb_network_header(skb); - unsigned int eh_len = nh - data; - - /* The tcp header follows the ip header. */ - struct tcphdr *th = (struct tcphdr *)(nh + ih_len); - unsigned int th_len = th->doff * 4; - - /* The total number of header bytes. */ - /* NOTE: This may be less than skb_headlen(skb). */ - unsigned int sh_len = eh_len + ih_len + th_len; - - /* The number of payload bytes at "skb->data + sh_len". */ - /* This is non-zero for sendfile() without HIGHDMA. */ - unsigned int b_len = skb_headlen(skb) - sh_len; - - /* The total number of payload bytes. */ - unsigned int d_len = b_len + skb->data_len; - - /* The maximum payload size. */ - unsigned int p_len = sh->gso_size; - - /* The total number of segments. */ - unsigned int num_segs = sh->gso_segs; - - /* The temporary copy of the command. */ - u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; - lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; - - /* Analyze the "frags". */ - unsigned int num_frags = - tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); - - /* The size of the command, including frags and header. */ - size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); - - /* The command header. */ - lepp_tso_cmd_t cmd_init = { - .tso = true, - .header_size = sh_len, - .ip_offset = eh_len, - .tcp_offset = eh_len + ih_len, - .payload_size = p_len, - .num_frags = num_frags, - }; - - unsigned long irqflags; - - lepp_queue_t *eq = priv->eq; - - struct sk_buff *olds[8]; - unsigned int wanted = 8; - unsigned int i, nolds = 0; - - unsigned int cmd_head, cmd_tail, cmd_next; - unsigned int comp_tail; - - - /* Paranoia. */ - BUG_ON(skb->protocol != htons(ETH_P_IP)); - BUG_ON(ih->protocol != IPPROTO_TCP); - BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); - BUG_ON(num_frags > LEPP_MAX_FRAGS); - /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ - BUG_ON(num_segs <= 1); - - - /* Finish preparing the command. */ - - /* Copy the command header. */ - *cmd = cmd_init; - - /* Copy the "header". */ - memcpy(&cmd->frags[num_frags], data, sh_len); - - - /* Prefetch and wait, to minimize time spent holding the spinlock. */ - prefetch_L1(&eq->comp_tail); - prefetch_L1(&eq->cmd_tail); - mb(); - - - /* Enqueue the command. */ - - spin_lock_irqsave(&priv->eq_lock, irqflags); - - /* Handle completions if needed to make room. */ - /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ - if (lepp_num_free_comp_slots(eq) == 0) { - nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); - if (nolds == 0) { -busy: - spin_unlock_irqrestore(&priv->eq_lock, irqflags); - return NETDEV_TX_BUSY; - } - } - - cmd_head = eq->cmd_head; - cmd_tail = eq->cmd_tail; - - /* Prepare to advance, detecting full queue. */ - /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ - cmd_next = cmd_tail + cmd_size; - if (cmd_tail < cmd_head && cmd_next >= cmd_head) - goto busy; - if (cmd_next > LEPP_CMD_LIMIT) { - cmd_next = 0; - if (cmd_next == cmd_head) - goto busy; - } - - /* Copy the command. */ - memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); - - /* Advance. */ - cmd_tail = cmd_next; - - /* Record "skb" for eventual freeing. */ - comp_tail = eq->comp_tail; - eq->comps[comp_tail] = skb; - LEPP_QINC(comp_tail); - eq->comp_tail = comp_tail; - - /* Flush before allowing LEPP to handle the command. */ - /* ISSUE: Is this the optimal location for the flush? */ - __insn_mf(); - - eq->cmd_tail = cmd_tail; - - /* NOTE: Using "4" here is more efficient than "0" or "2", */ - /* and, strangely, more efficient than pre-checking the number */ - /* of available completions, and comparing it to 4. */ - if (nolds == 0) - nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); - - spin_unlock_irqrestore(&priv->eq_lock, irqflags); - - /* Handle completions. */ - for (i = 0; i < nolds; i++) - dev_consume_skb_any(olds[i]); - - /* Update stats. */ - u64_stats_update_begin(&stats->syncp); - stats->tx_packets += num_segs; - stats->tx_bytes += (num_segs * sh_len) + d_len; - u64_stats_update_end(&stats->syncp); - - /* Make sure the egress timer is scheduled. */ - tile_net_schedule_egress_timer(info); - - return NETDEV_TX_OK; -} - - -/* - * Transmit a packet (called by the kernel via "hard_start_xmit" hook). - */ -static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - int my_cpu = smp_processor_id(); - struct tile_net_cpu *info = priv->cpu[my_cpu]; - struct tile_net_stats_t *stats = &info->stats; - - unsigned long irqflags; - - struct skb_shared_info *sh = skb_shinfo(skb); - - unsigned int len = skb->len; - unsigned char *data = skb->data; - - unsigned int csum_start = skb_checksum_start_offset(skb); - - lepp_frag_t frags[1 + MAX_SKB_FRAGS]; - - unsigned int num_frags; - - lepp_queue_t *eq = priv->eq; - - struct sk_buff *olds[8]; - unsigned int wanted = 8; - unsigned int i, nolds = 0; - - unsigned int cmd_size = sizeof(lepp_cmd_t); - - unsigned int cmd_head, cmd_tail, cmd_next; - unsigned int comp_tail; - - lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; - - - /* - * This is paranoia, since we think that if the link doesn't come - * up, telling Linux we have no carrier will keep it from trying - * to transmit. If it does, though, we can't execute this routine, - * since data structures we depend on aren't set up yet. - */ - if (!info->registered) - return NETDEV_TX_BUSY; - - - /* Save the timestamp. */ - netif_trans_update(dev); - - -#ifdef TILE_NET_PARANOIA -#if CHIP_HAS_CBOX_HOME_MAP() - if (hash_default) { - HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); - if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) - panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", - data, hv_pte_get_mode(pte), hv_pte_val(pte)); - } -#endif -#endif - - -#ifdef TILE_NET_DUMP_PACKETS - /* ISSUE: Does not dump the "frags". */ - dump_packet(data, skb_headlen(skb), "tx"); -#endif /* TILE_NET_DUMP_PACKETS */ - - - if (sh->gso_size != 0) - return tile_net_tx_tso(skb, dev); - - - /* Prepare the commands. */ - - num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); - - for (i = 0; i < num_frags; i++) { - - bool final = (i == num_frags - 1); - - lepp_cmd_t cmd = { - .cpa_lo = frags[i].cpa_lo, - .cpa_hi = frags[i].cpa_hi, - .length = frags[i].length, - .hash_for_home = frags[i].hash_for_home, - .send_completion = final, - .end_of_packet = final - }; - - if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { - cmd.compute_checksum = 1; - cmd.checksum_data.bits.start_byte = csum_start; - cmd.checksum_data.bits.count = len - csum_start; - cmd.checksum_data.bits.destination_byte = - csum_start + skb->csum_offset; - } - - cmds[i] = cmd; - } - - - /* Prefetch and wait, to minimize time spent holding the spinlock. */ - prefetch_L1(&eq->comp_tail); - prefetch_L1(&eq->cmd_tail); - mb(); - - - /* Enqueue the commands. */ - - spin_lock_irqsave(&priv->eq_lock, irqflags); - - /* Handle completions if needed to make room. */ - /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ - if (lepp_num_free_comp_slots(eq) == 0) { - nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); - if (nolds == 0) { -busy: - spin_unlock_irqrestore(&priv->eq_lock, irqflags); - return NETDEV_TX_BUSY; - } - } - - cmd_head = eq->cmd_head; - cmd_tail = eq->cmd_tail; - - /* Copy the commands, or fail. */ - /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ - for (i = 0; i < num_frags; i++) { - - /* Prepare to advance, detecting full queue. */ - cmd_next = cmd_tail + cmd_size; - if (cmd_tail < cmd_head && cmd_next >= cmd_head) - goto busy; - if (cmd_next > LEPP_CMD_LIMIT) { - cmd_next = 0; - if (cmd_next == cmd_head) - goto busy; - } - - /* Copy the command. */ - *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; - - /* Advance. */ - cmd_tail = cmd_next; - } - - /* Record "skb" for eventual freeing. */ - comp_tail = eq->comp_tail; - eq->comps[comp_tail] = skb; - LEPP_QINC(comp_tail); - eq->comp_tail = comp_tail; - - /* Flush before allowing LEPP to handle the command. */ - /* ISSUE: Is this the optimal location for the flush? */ - __insn_mf(); - - eq->cmd_tail = cmd_tail; - - /* NOTE: Using "4" here is more efficient than "0" or "2", */ - /* and, strangely, more efficient than pre-checking the number */ - /* of available completions, and comparing it to 4. */ - if (nolds == 0) - nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); - - spin_unlock_irqrestore(&priv->eq_lock, irqflags); - - /* Handle completions. */ - for (i = 0; i < nolds; i++) - dev_consume_skb_any(olds[i]); - - /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); - u64_stats_update_end(&stats->syncp); - - /* Make sure the egress timer is scheduled. */ - tile_net_schedule_egress_timer(info); - - return NETDEV_TX_OK; -} - - -/* - * Deal with a transmit timeout. - */ -static void tile_net_tx_timeout(struct net_device *dev) -{ - PDEBUG("tile_net_tx_timeout()\n"); - PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, - jiffies - dev_trans_start(dev)); - - /* XXX: ISSUE: This doesn't seem useful for us. */ - netif_wake_queue(dev); -} - - -/* - * Ioctl commands. - */ -static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - return -EOPNOTSUPP; -} - - -/* - * Get System Network Statistics. - * - * Returns the address of the device statistics structure. - */ -static void tile_net_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - struct tile_net_priv *priv = netdev_priv(dev); - u64 rx_packets = 0, tx_packets = 0; - u64 rx_bytes = 0, tx_bytes = 0; - u64 rx_errors = 0, rx_dropped = 0; - int i; - - for_each_online_cpu(i) { - struct tile_net_stats_t *cpu_stats; - u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; - u64 trx_errors, trx_dropped; - unsigned int start; - - if (priv->cpu[i] == NULL) - continue; - cpu_stats = &priv->cpu[i]->stats; - - do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); - trx_packets = cpu_stats->rx_packets; - ttx_packets = cpu_stats->tx_packets; - trx_bytes = cpu_stats->rx_bytes; - ttx_bytes = cpu_stats->tx_bytes; - trx_errors = cpu_stats->rx_errors; - trx_dropped = cpu_stats->rx_dropped; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); - - rx_packets += trx_packets; - tx_packets += ttx_packets; - rx_bytes += trx_bytes; - tx_bytes += ttx_bytes; - rx_errors += trx_errors; - rx_dropped += trx_dropped; - } - - stats->rx_packets = rx_packets; - stats->tx_packets = tx_packets; - stats->rx_bytes = rx_bytes; - stats->tx_bytes = tx_bytes; - stats->rx_errors = rx_errors; - stats->rx_dropped = rx_dropped; -} - -/* - * Change the Ethernet Address of the NIC. - * - * The hypervisor driver does not support changing MAC address. However, - * the IPP does not do anything with the MAC address, so the address which - * gets used on outgoing packets, and which is accepted on incoming packets, - * is completely up to the NetIO program or kernel driver which is actually - * handling them. - * - * Returns 0 on success, negative on failure. - */ -static int tile_net_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - /* ISSUE: Note that "dev_addr" is now a pointer. */ - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - return 0; -} - - -/* - * Obtain the MAC address from the hypervisor. - * This must be done before opening the device. - */ -static int tile_net_get_mac(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - - char hv_dev_name[32]; - int len; - - __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; - - int ret; - - /* For example, "xgbe0". */ - strcpy(hv_dev_name, dev->name); - len = strlen(hv_dev_name); - - /* For example, "xgbe/0". */ - hv_dev_name[len] = hv_dev_name[len - 1]; - hv_dev_name[len - 1] = '/'; - len++; - - /* For example, "xgbe/0/native_hash". */ - strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); - - /* Get the hypervisor handle for this device. */ - priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); - PDEBUG("hv_dev_open(%s) returned %d %p\n", - hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); - if (priv->hv_devhdl < 0) { - if (priv->hv_devhdl == HV_ENODEV) - printk(KERN_DEBUG "Ignoring unconfigured device %s\n", - hv_dev_name); - else - printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", - hv_dev_name, priv->hv_devhdl); - return -1; - } - - /* - * Read the hardware address from the hypervisor. - * ISSUE: Note that "dev_addr" is now a pointer. - */ - offset.bits.class = NETIO_PARAM; - offset.bits.addr = NETIO_PARAM_MAC; - ret = hv_dev_pread(priv->hv_devhdl, 0, - (HV_VirtAddr)dev->dev_addr, dev->addr_len, - offset.word); - PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); - if (ret <= 0) { - printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", - dev->name); - /* - * Since the device is configured by the hypervisor but we - * can't get its MAC address, we are most likely running - * the simulator, so let's generate a random MAC address. - */ - eth_hw_addr_random(dev); - } - - return 0; -} - - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void tile_net_netpoll(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - disable_percpu_irq(priv->intr_id); - tile_net_handle_ingress_interrupt(priv->intr_id, dev); - enable_percpu_irq(priv->intr_id, 0); -} -#endif - - -static const struct net_device_ops tile_net_ops = { - .ndo_open = tile_net_open, - .ndo_stop = tile_net_stop, - .ndo_start_xmit = tile_net_tx, - .ndo_do_ioctl = tile_net_ioctl, - .ndo_get_stats64 = tile_net_get_stats64, - .ndo_tx_timeout = tile_net_tx_timeout, - .ndo_set_mac_address = tile_net_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tile_net_netpoll, -#endif -}; - - -/* - * The setup function. - * - * This uses ether_setup() to assign various fields in dev, including - * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. - */ -static void tile_net_setup(struct net_device *dev) -{ - netdev_features_t features = 0; - - ether_setup(dev); - dev->netdev_ops = &tile_net_ops; - dev->watchdog_timeo = TILE_NET_TIMEOUT; - dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; - - /* MTU range: 68 - 1500 */ - dev->mtu = TILE_NET_MTU; - dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = TILE_NET_MTU; - - features |= NETIF_F_HW_CSUM; - features |= NETIF_F_SG; - - /* We support TSO iff the HV supports sufficient frags. */ - if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) - features |= NETIF_F_TSO; - - /* We can't support HIGHDMA without hash_default, since we need - * to be able to finv() with a VA if we don't have hash_default. - */ - if (hash_default) - features |= NETIF_F_HIGHDMA; - - dev->hw_features |= features; - dev->vlan_features |= features; - dev->features |= features; -} - - -/* - * Allocate the device structure, register the device, and obtain the - * MAC address from the hypervisor. - */ -static struct net_device *tile_net_dev_init(const char *name) -{ - int ret; - struct net_device *dev; - struct tile_net_priv *priv; - - /* - * Allocate the device structure. This allocates "priv", calls - * tile_net_setup(), and saves "name". Normally, "name" is a - * template, instantiated by register_netdev(), but not for us. - */ - dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, - tile_net_setup); - if (!dev) { - pr_err("alloc_netdev(%s) failed\n", name); - return NULL; - } - - priv = netdev_priv(dev); - - /* Initialize "priv". */ - - memset(priv, 0, sizeof(*priv)); - - /* Save "dev" for "tile_net_open_retry()". */ - priv->dev = dev; - - INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); - - spin_lock_init(&priv->eq_lock); - - /* Allocate "eq". */ - priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); - if (!priv->eq_pages) { - free_netdev(dev); - return NULL; - } - priv->eq = page_address(priv->eq_pages); - - /* Register the network device. */ - ret = register_netdev(dev); - if (ret) { - pr_err("register_netdev %s failed %d\n", dev->name, ret); - __free_pages(priv->eq_pages, EQ_ORDER); - free_netdev(dev); - return NULL; - } - - /* Get the MAC address. */ - ret = tile_net_get_mac(dev); - if (ret < 0) { - unregister_netdev(dev); - __free_pages(priv->eq_pages, EQ_ORDER); - free_netdev(dev); - return NULL; - } - - return dev; -} - - -/* - * Module cleanup. - * - * FIXME: If compiled as a module, this module cannot be "unloaded", - * because the "ingress interrupt handler" is registered permanently. - */ -static void tile_net_cleanup(void) -{ - int i; - - for (i = 0; i < TILE_NET_DEVS; i++) { - if (tile_net_devs[i]) { - struct net_device *dev = tile_net_devs[i]; - struct tile_net_priv *priv = netdev_priv(dev); - unregister_netdev(dev); - finv_buffer_remote(priv->eq, EQ_SIZE, 0); - __free_pages(priv->eq_pages, EQ_ORDER); - free_netdev(dev); - } - } -} - - -/* - * Module initialization. - */ -static int tile_net_init_module(void) -{ - pr_info("Tilera Network Driver\n"); - - tile_net_devs[0] = tile_net_dev_init("xgbe0"); - tile_net_devs[1] = tile_net_dev_init("xgbe1"); - tile_net_devs[2] = tile_net_dev_init("gbe0"); - tile_net_devs[3] = tile_net_dev_init("gbe1"); - - return 0; -} - - -module_init(tile_net_init_module); -module_exit(tile_net_cleanup); - - -#ifndef MODULE - -/* - * The "network_cpus" boot argument specifies the cpus that are dedicated - * to handle ingress packets. - * - * The parameter should be in the form "network_cpus=m-n[,x-y]", where - * m, n, x, y are integer numbers that represent the cpus that can be - * neither a dedicated cpu nor a dataplane cpu. - */ -static int __init network_cpus_setup(char *str) -{ - int rc = cpulist_parse_crop(str, &network_cpus_map); - if (rc != 0) { - pr_warn("network_cpus=%s: malformed cpu list\n", str); - } else { - - /* Remove dedicated cpus. */ - cpumask_and(&network_cpus_map, &network_cpus_map, - cpu_possible_mask); - - - if (cpumask_empty(&network_cpus_map)) { - pr_warn("Ignoring network_cpus='%s'\n", str); - } else { - pr_info("Linux network CPUs: %*pbl\n", - cpumask_pr_args(&network_cpus_map)); - network_cpus_used = true; - } - } - - return 0; -} -__setup("network_cpus=", network_cpus_setup); - -#endif diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 78a6414c5fd9..dfabbae72efd 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -590,8 +590,7 @@ static int bpq_device_event(struct notifier_block *this, static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS - if (!proc_create("bpqether", S_IRUGO, init_net.proc_net, - &bpq_info_fops)) { + if (!proc_create("bpqether", 0444, init_net.proc_net, &bpq_info_fops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 14c3632b8cde..83034eb7ed4f 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1168,7 +1168,7 @@ static int __init yam_init_driver(void) yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); - proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops); + proc_create("yam", 0444, init_net.proc_net, &yam_info_fops); return 0; error: while (--i >= 0) { diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile index c8a66827100c..3f25b9c8ea59 100644 --- a/drivers/net/hyperv/Makefile +++ b/drivers/net/hyperv/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o -hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o +hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 0db3bd1ea06f..960f06141472 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -173,6 +173,7 @@ struct rndis_device { struct list_head req_list; struct work_struct mcast_work; + u32 filter; bool link_state; /* 0 - link up, 1 - link down */ @@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); void rndis_set_subchannel(struct work_struct *w); -bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -793,6 +793,7 @@ struct netvsc_device { /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; + u32 recv_buf_size; /* allocated bytes */ u32 recv_buf_gpadl_handle; u32 recv_section_cnt; u32 recv_section_size; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 17e529af79dc..c9910c33e671 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -36,6 +36,7 @@ #include <asm/sync_bitops.h> #include "hyperv_net.h" +#include "netvsc_trace.h" /* * Switch the data path from the synthetic interface to the VF @@ -57,6 +58,8 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) init_pkt->msg.v4_msg.active_dp.active_datapath = NVSP_DATAPATH_SYNTHETIC; + trace_nvsp_send(ndev, init_pkt); + vmbus_sendpacket(dev->channel, init_pkt, sizeof(struct nvsp_message), (unsigned long)init_pkt, @@ -90,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head) = container_of(head, struct netvsc_device, rcu); int i; + kfree(nvdev->extension); + vfree(nvdev->recv_buf); + vfree(nvdev->send_buf); + kfree(nvdev->send_section_map); + for (i = 0; i < VRSS_CHANNEL_MAX; i++) vfree(nvdev->chan_table[i].mrc.slots); @@ -124,6 +132,8 @@ static void netvsc_revoke_buf(struct hv_device *device, revoke_packet->msg.v1_msg. revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; + trace_nvsp_send(ndev, revoke_packet); + ret = vmbus_sendpacket(device->channel, revoke_packet, sizeof(struct nvsp_message), @@ -164,6 +174,8 @@ static void netvsc_revoke_buf(struct hv_device *device, revoke_packet->msg.v1_msg.revoke_send_buf.id = NETVSC_SEND_BUFFER_ID; + trace_nvsp_send(ndev, revoke_packet); + ret = vmbus_sendpacket(device->channel, revoke_packet, sizeof(struct nvsp_message), @@ -211,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, net_device->recv_buf_gpadl_handle = 0; } - if (net_device->recv_buf) { - /* Free up the receive buffer */ - vfree(net_device->recv_buf); - net_device->recv_buf = NULL; - } - if (net_device->send_buf_gpadl_handle) { ret = vmbus_teardown_gpadl(device->channel, net_device->send_buf_gpadl_handle); @@ -231,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, } net_device->send_buf_gpadl_handle = 0; } - if (net_device->send_buf) { - /* Free up the send buffer */ - vfree(net_device->send_buf); - net_device->send_buf = NULL; - } - kfree(net_device->send_section_map); } int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) @@ -282,6 +282,8 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; } + net_device->recv_buf_size = buf_size; + /* * Establish the gpadl handle for this buffer on this * channel. Note: This call uses the vmbus connection rather @@ -305,6 +307,8 @@ static int netvsc_init_buf(struct hv_device *device, init_packet->msg.v1_msg. send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; + trace_nvsp_send(ndev, init_packet); + /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), @@ -384,6 +388,8 @@ static int netvsc_init_buf(struct hv_device *device, net_device->send_buf_gpadl_handle; init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; + trace_nvsp_send(ndev, init_packet); + /* Send the gpadl notification request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), @@ -452,6 +458,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; + trace_nvsp_send(ndev, init_packet); + /* Send the init request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), @@ -484,6 +492,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; } + trace_nvsp_send(ndev, init_packet); + ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -496,6 +506,7 @@ static int netvsc_connect_vsp(struct hv_device *device, struct netvsc_device *net_device, const struct netvsc_device_info *device_info) { + struct net_device *ndev = hv_get_drvdata(device); static const u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 @@ -536,6 +547,8 @@ static int netvsc_connect_vsp(struct hv_device *device, send_ndis_ver.ndis_minor_ver = ndis_version & 0xFFFF; + trace_nvsp_send(ndev, init_packet); + /* Send the init request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), @@ -562,26 +575,29 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; - cancel_work_sync(&net_device->subchan_work); - netvsc_revoke_buf(device, net_device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + /* And disassociate NAPI context from device */ + for (i = 0; i < net_device->num_chn; i++) + netif_napi_del(&net_device->chan_table[i].napi); + /* * At this point, no one should be accessing net_device * except in here */ netdev_dbg(ndev, "net device safe to remove\n"); + /* older versions require that buffer be revoked before close */ + if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4) + netvsc_teardown_gpadl(device, net_device); + /* Now, we can close the channel safely */ vmbus_close(device->channel); - netvsc_teardown_gpadl(device, net_device); - - /* And dissassociate NAPI context from device */ - for (i = 0; i < net_device->num_chn; i++) - netif_napi_del(&net_device->chan_table[i].napi); + if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4) + netvsc_teardown_gpadl(device, net_device); /* Release all resources */ free_netvsc_device_rcu(net_device); @@ -645,14 +661,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, queue_sends = atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && queue_sends == 0) - wake_up(&net_device->wait_drain); + if (unlikely(net_device->destroy)) { + if (queue_sends == 0) + wake_up(&net_device->wait_drain); + } else { + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || - queue_sends < 1)) { - netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); - ndev_ctx->eth_stats.wake_queue++; + if (netif_tx_queue_stopped(txq) && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) { + netif_tx_wake_queue(txq); + ndev_ctx->eth_stats.wake_queue++; + } } } @@ -747,7 +767,7 @@ static inline int netvsc_send_pkt( struct sk_buff *skb) { struct nvsp_message nvmsg; - struct nvsp_1_message_send_rndis_packet * const rpkt = + struct nvsp_1_message_send_rndis_packet *rpkt = &nvmsg.msg.v1_msg.send_rndis_pkt; struct netvsc_channel * const nvchan = &net_device->chan_table[packet->q_idx]; @@ -776,6 +796,8 @@ static inline int netvsc_send_pkt( if (out_channel->rescind) return -ENODEV; + trace_nvsp_send_pkt(ndev, out_channel, rpkt); + if (packet->page_buf_cnt) { if (packet->cp_partial) pb += packet->rmsg_pgcnt; @@ -852,13 +874,6 @@ int netvsc_send(struct net_device *ndev, if (unlikely(!net_device || net_device->destroy)) return -ENODEV; - /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get - * here before the negotiation with the host is finished and - * send_section_map may not be allocated yet. - */ - if (unlikely(!net_device->send_section_map)) - return -EAGAIN; - nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -866,10 +881,8 @@ int netvsc_send(struct net_device *ndev, /* Send control message directly without accessing msd (Multi-Send * Data) field which may be changed during data packet processing. */ - if (!skb) { - cur_send = packet; - goto send_now; - } + if (!skb) + return netvsc_send_pkt(device, packet, net_device, pb, skb); /* batch packets in send buffer if possible */ msdp = &nvchan->msd; @@ -953,7 +966,6 @@ int netvsc_send(struct net_device *ndev, } } -send_now: if (cur_send) ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); @@ -1085,13 +1097,30 @@ static int netvsc_receive(struct net_device *ndev, /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { - void *data = recv_buf - + vmxferpage_packet->ranges[i].byte_offset; + u32 offset = vmxferpage_packet->ranges[i].byte_offset; u32 buflen = vmxferpage_packet->ranges[i].byte_count; + void *data; + int ret; + + if (unlikely(offset + buflen > net_device->recv_buf_size)) { + status = NVSP_STAT_FAIL; + netif_err(net_device_ctx, rx_err, ndev, + "Packet offset:%u + len:%u too big\n", + offset, buflen); + + continue; + } + + data = recv_buf + offset; + + trace_rndis_recv(ndev, q_idx, data); /* Pass it to the upper layer */ - status = rndis_filter_receive(ndev, net_device, - channel, data, buflen); + ret = rndis_filter_receive(ndev, net_device, + channel, data, buflen); + + if (unlikely(ret != NVSP_STAT_SUCCESS)) + status = NVSP_STAT_FAIL; } enq_receive_complete(ndev, net_device, q_idx, @@ -1153,6 +1182,8 @@ static int netvsc_process_raw_pkt(struct hv_device *device, struct net_device_context *net_device_ctx = netdev_priv(ndev); struct nvsp_message *nvmsg = hv_pkt_data(desc); + trace_nvsp_recv(ndev, channel, nvmsg); + switch (desc->type) { case VM_PKT_COMP: netvsc_send_completion(net_device, channel, device, @@ -1217,9 +1248,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) if (send_recv_completions(ndev, net_device, nvchan) == 0 && work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound)) { + hv_end_read(&channel->inbound) && + napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); - napi_reschedule(napi); + __napi_schedule(napi); } /* Driver may overshoot since multiple packets per descriptor */ @@ -1242,7 +1274,7 @@ void netvsc_channel_cb(void *context) /* disable interupts from host */ hv_begin_read(rbi); - __napi_schedule(&nvchan->napi); + __napi_schedule_irqoff(&nvchan->napi); } } @@ -1296,7 +1328,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, netvsc_channel_cb, net_device->chan_table); if (ret != 0) { - netif_napi_del(&net_device->chan_table[0].napi); netdev_err(ndev, "unable to open channel: %d\n", ret); goto cleanup; } @@ -1306,11 +1337,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, napi_enable(&net_device->chan_table[0].napi); - /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is - * populated. - */ - rcu_assign_pointer(net_device_ctx->nvdev, net_device); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1319,6 +1345,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, goto close; } + /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is + * populated. + */ + rcu_assign_pointer(net_device_ctx->nvdev, net_device); + return net_device; close: @@ -1329,6 +1360,7 @@ close: vmbus_close(device->channel); cleanup: + netif_napi_del(&net_device->chan_table[0].napi); free_netvsc_device(&net_device->rcu); return ERR_PTR(ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c5584c2d440e..ecc84954c511 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,13 +46,16 @@ #include "hyperv_net.h" -#define RING_SIZE_MIN 64 +#define RING_SIZE_MIN 64 +#define RETRY_US_LO 5000 +#define RETRY_US_HI 10000 +#define RETRY_MAX 2000 /* >10 sec */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) static unsigned int ring_size __ro_after_init = 128; -module_param(ring_size, uint, S_IRUGO); +module_param(ring_size, uint, 0444); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); unsigned int netvsc_ring_bytes __ro_after_init; struct reciprocal_value netvsc_ring_reciprocal __ro_after_init; @@ -63,15 +66,46 @@ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_TX_ERR; static int debug = -1; -module_param(debug, int, S_IRUGO); +module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void netvsc_set_multicast_list(struct net_device *net) +static void netvsc_change_rx_flags(struct net_device *net, int change) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + int inc; + + if (!vf_netdev) + return; + + if (change & IFF_PROMISC) { + inc = (net->flags & IFF_PROMISC) ? 1 : -1; + dev_set_promiscuity(vf_netdev, inc); + } + + if (change & IFF_ALLMULTI) { + inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; + dev_set_allmulti(vf_netdev, inc); + } +} - rndis_filter_update(nvdev); +static void netvsc_set_rx_mode(struct net_device *net) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev; + struct netvsc_device *nvdev; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) { + dev_uc_sync(vf_netdev, net); + dev_mc_sync(vf_netdev, net); + } + + nvdev = rcu_dereference(ndev_ctx->nvdev); + if (nvdev) + rndis_filter_update(nvdev); + rcu_read_unlock(); } static int netvsc_open(struct net_device *net) @@ -91,10 +125,7 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_wake_all_queues(net); - rdev = nvdev->extension; - if (!rdev->link_state) netif_carrier_on(net); @@ -112,36 +143,25 @@ static int netvsc_open(struct net_device *net) return 0; } -static int netvsc_close(struct net_device *net) +static int netvsc_wait_until_empty(struct netvsc_device *nvdev) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct net_device *vf_netdev - = rtnl_dereference(net_device_ctx->vf_netdev); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - int ret = 0; - u32 aread, i, msec = 10, retry = 0, retry_max = 20; - struct vmbus_channel *chn; - - netif_tx_disable(net); - - /* No need to close rndis filter if it is removed already */ - if (!nvdev) - goto out; - - ret = rndis_filter_close(nvdev); - if (ret != 0) { - netdev_err(net, "unable to close device (ret %d).\n", ret); - return ret; - } + unsigned int retry = 0; + int i; /* Ensure pending bytes in ring are read */ - while (true) { - aread = 0; + for (;;) { + u32 aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chan_table[i].channel; + struct vmbus_channel *chn + = nvdev->chan_table[i].channel; + if (!chn) continue; + /* make sure receive not running now */ + napi_synchronize(&nvdev->chan_table[i].napi); + aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break; @@ -151,22 +171,40 @@ static int netvsc_close(struct net_device *net) break; } - retry++; - if (retry > retry_max || aread == 0) - break; + if (aread == 0) + return 0; - msleep(msec); + if (++retry > RETRY_MAX) + return -ETIMEDOUT; - if (msec < 1000) - msec *= 2; + usleep_range(RETRY_US_LO, RETRY_US_HI); } +} - if (aread) { - netdev_err(net, "Ring buffer not empty after closing rndis\n"); - ret = -ETIMEDOUT; +static int netvsc_close(struct net_device *net) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + int ret; + + netif_tx_disable(net); + + /* No need to close rndis filter if it is removed already */ + if (!nvdev) + return 0; + + ret = rndis_filter_close(nvdev); + if (ret != 0) { + netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; } -out: + ret = netvsc_wait_until_empty(nvdev); + if (ret) + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + if (vf_netdev) dev_close(vf_netdev); @@ -299,8 +337,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, rcu_read_lock(); vf_netdev = rcu_dereference(ndc->vf_netdev); if (vf_netdev) { - txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; + + if (vf_ops->ndo_select_queue) + txq = vf_ops->ndo_select_queue(vf_netdev, skb, + accel_priv, fallback); + else + txq = fallback(vf_netdev, skb); + + /* Record the queue selected by VF so that it can be + * used for common case where VF has more queues than + * the synthetic device. + */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; } else { txq = netvsc_pick_tx(ndev, skb); } @@ -782,7 +831,7 @@ int netvsc_recv_callback(struct net_device *net, u64_stats_update_end(&rx_stats->syncp); napi_gro_receive(&nvchan->napi, skb); - return 0; + return NVSP_STAT_SUCCESS; } static void netvsc_get_drvinfo(struct net_device *net, @@ -804,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_detach(struct net_device *ndev, + struct netvsc_device *nvdev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + int ret; + + /* Don't try continuing to try and setup sub channels */ + if (cancel_work_sync(&nvdev->subchan_work)) + nvdev->num_chn = 1; + + /* If device was up (receiving) then shutdown */ + if (netif_running(ndev)) { + netif_tx_disable(ndev); + + ret = rndis_filter_close(nvdev); + if (ret) { + netdev_err(ndev, + "unable to close device (ret %d).\n", ret); + return ret; + } + + ret = netvsc_wait_until_empty(nvdev); + if (ret) { + netdev_err(ndev, + "Ring buffer not empty after closing rndis\n"); + return ret; + } + } + + netif_device_detach(ndev); + + rndis_filter_device_remove(hdev, nvdev); + + return 0; +} + +static int netvsc_attach(struct net_device *ndev, + struct netvsc_device_info *dev_info) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + struct netvsc_device *nvdev; + struct rndis_device *rdev; + int ret; + + nvdev = rndis_filter_device_add(hdev, dev_info); + if (IS_ERR(nvdev)) + return PTR_ERR(nvdev); + + /* Note: enable and attach happen when sub-channels setup */ + + netif_carrier_off(ndev); + + if (netif_running(ndev)) { + ret = rndis_filter_open(nvdev); + if (ret) + return ret; + + rdev = nvdev->extension; + if (!rdev->link_state) + netif_carrier_on(ndev); + } + + return 0; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; struct netvsc_device_info device_info; - bool was_opened; - int ret = 0; + int ret; /* We do not support separate count for rx, tx, or other */ if (count == 0 || @@ -830,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net, return -EINVAL; orig = nvdev->num_chn; - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; @@ -841,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net, device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(dev, nvdev); + ret = netvsc_detach(net, nvdev); + if (ret) + return ret; - nvdev = rndis_filter_device_add(dev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_attach(net, &device_info); + if (ret) { device_info.num_chn = orig; - nvdev = rndis_filter_device_add(dev, &device_info); - - if (IS_ERR(nvdev)) { - netdev_err(net, "restoring channel setting failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } + if (netvsc_attach(net, &device_info)) + netdev_err(net, "restoring channel setting failed\n"); } - if (was_opened) - rndis_filter_open(nvdev); - - /* We may have missed link change notifications */ - net_device_ctx->last_reconfig = 0; - schedule_delayed_work(&net_device_ctx->dwork, 0); - return ret; } @@ -928,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device_context *ndevctx = netdev_priv(ndev); struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -944,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = nvdev->num_chn; device_info.send_sections = nvdev->send_section_cnt; @@ -956,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(hdev, nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + goto rollback_vf; ndev->mtu = mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); - - /* Attempt rollback to original MTU */ - ndev->mtu = orig_mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - - if (vf_netdev) - dev_set_mtu(vf_netdev, orig_mtu); - - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring mtu failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } + ret = netvsc_attach(ndev, &device_info); + if (ret) + goto rollback; - if (was_opened) - rndis_filter_open(nvdev); + return 0; - netif_device_attach(ndev); +rollback: + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; - /* We may have missed link change notifications */ - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring mtu failed\n"); +rollback_vf: + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); return ret; } @@ -1490,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -1519,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev, device_info.recv_sections = new_rx; device_info.recv_section_size = nvdev->recv_section_size; - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - - rndis_filter_device_remove(hdev, nvdev); - - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + return ret; + ret = netvsc_attach(ndev, &device_info); + if (ret) { device_info.send_sections = orig.tx_pending; device_info.recv_sections = orig.rx_pending; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring ringparam failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } - if (was_opened) - rndis_filter_open(nvdev); - netif_device_attach(ndev); - - /* We may have missed link change notifications */ - ndevctx->last_reconfig = 0; - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring ringparam failed"); + } return ret; } @@ -1576,7 +1643,8 @@ static const struct net_device_ops device_ops = { .ndo_open = netvsc_open, .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, - .ndo_set_rx_mode = netvsc_set_multicast_list, + .ndo_change_rx_flags = netvsc_change_rx_flags, + .ndo_set_rx_mode = netvsc_set_rx_mode, .ndo_change_mtu = netvsc_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, @@ -1807,6 +1875,15 @@ static void __netvsc_vf_setup(struct net_device *ndev, netdev_warn(vf_netdev, "unable to change mtu to %u\n", ndev->mtu); + /* set multicast etc flags on VF */ + dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + + /* sync address list from ndev to VF */ + netif_addr_lock_bh(ndev); + dev_uc_sync(vf_netdev, ndev); + dev_mc_sync(vf_netdev, ndev); + netif_addr_unlock_bh(ndev); + if (netif_running(ndev)) { ret = dev_open(vf_netdev); if (ret) @@ -2021,8 +2098,8 @@ no_net: static int netvsc_remove(struct hv_device *dev) { struct net_device_context *ndev_ctx; - struct net_device *vf_netdev; - struct net_device *net; + struct net_device *vf_netdev, *net; + struct netvsc_device *nvdev; net = hv_get_drvdata(dev); if (net == NULL) { @@ -2032,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); - netif_device_detach(net); - cancel_delayed_work_sync(&ndev_ctx->dwork); + rcu_read_lock(); + nvdev = rcu_dereference(ndev_ctx->nvdev); + + if (nvdev) + cancel_work_sync(&nvdev->subchan_work); + /* * Call to the vsc driver to let it know that the device is being * removed. Also blocks mtu and channel changes. @@ -2045,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev) if (vf_netdev) netvsc_unregister_vf(vf_netdev); + if (nvdev) + rndis_filter_device_remove(dev, nvdev); + unregister_netdevice(net); - rndis_filter_device_remove(dev, - rtnl_dereference(ndev_ctx->nvdev)); rtnl_unlock(); + rcu_read_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/netvsc_trace.c b/drivers/net/hyperv/netvsc_trace.c new file mode 100644 index 000000000000..bb0ce5a2bcd5 --- /dev/null +++ b/drivers/net/hyperv/netvsc_trace.c @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/netdevice.h> + +#include "hyperv_net.h" + +#define CREATE_TRACE_POINTS +#include "netvsc_trace.h" diff --git a/drivers/net/hyperv/netvsc_trace.h b/drivers/net/hyperv/netvsc_trace.h new file mode 100644 index 000000000000..f7585563dea5 --- /dev/null +++ b/drivers/net/hyperv/netvsc_trace.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if !defined(_NETVSC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _NETVSC_TRACE_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM netvsc +#define TRACE_INCLUDE_FILE netvsc_trace + +TRACE_DEFINE_ENUM(RNDIS_MSG_PACKET); +TRACE_DEFINE_ENUM(RNDIS_MSG_INDICATE); +TRACE_DEFINE_ENUM(RNDIS_MSG_INIT); +TRACE_DEFINE_ENUM(RNDIS_MSG_INIT_C); +TRACE_DEFINE_ENUM(RNDIS_MSG_HALT); +TRACE_DEFINE_ENUM(RNDIS_MSG_QUERY); +TRACE_DEFINE_ENUM(RNDIS_MSG_QUERY_C); +TRACE_DEFINE_ENUM(RNDIS_MSG_SET); +TRACE_DEFINE_ENUM(RNDIS_MSG_SET_C); +TRACE_DEFINE_ENUM(RNDIS_MSG_RESET); +TRACE_DEFINE_ENUM(RNDIS_MSG_RESET_C); +TRACE_DEFINE_ENUM(RNDIS_MSG_KEEPALIVE); +TRACE_DEFINE_ENUM(RNDIS_MSG_KEEPALIVE_C); + +#define show_rndis_type(type) \ + __print_symbolic(type, \ + { RNDIS_MSG_PACKET, "PACKET" }, \ + { RNDIS_MSG_INDICATE, "INDICATE", }, \ + { RNDIS_MSG_INIT, "INIT", }, \ + { RNDIS_MSG_INIT_C, "INIT_C", }, \ + { RNDIS_MSG_HALT, "HALT", }, \ + { RNDIS_MSG_QUERY, "QUERY", }, \ + { RNDIS_MSG_QUERY_C, "QUERY_C", }, \ + { RNDIS_MSG_SET, "SET", }, \ + { RNDIS_MSG_SET_C, "SET_C", }, \ + { RNDIS_MSG_RESET, "RESET", }, \ + { RNDIS_MSG_RESET_C, "RESET_C", }, \ + { RNDIS_MSG_KEEPALIVE, "KEEPALIVE", }, \ + { RNDIS_MSG_KEEPALIVE_C, "KEEPALIVE_C", }) + +DECLARE_EVENT_CLASS(rndis_msg_class, + TP_PROTO(const struct net_device *ndev, u16 q, + const struct rndis_message *msg), + TP_ARGS(ndev, q, msg), + TP_STRUCT__entry( + __string( name, ndev->name ) + __field( u16, queue ) + __field( u32, req_id ) + __field( u32, msg_type ) + __field( u32, msg_len ) + ), + TP_fast_assign( + __assign_str(name, ndev->name); + __entry->queue = q; + __entry->req_id = msg->msg.init_req.req_id; + __entry->msg_type = msg->ndis_msg_type; + __entry->msg_len = msg->msg_len; + ), + TP_printk("dev=%s q=%u req=%#x type=%s msg_len=%u", + __get_str(name), __entry->queue, __entry->req_id, + show_rndis_type(__entry->msg_type), __entry->msg_len) +); + +DEFINE_EVENT(rndis_msg_class, rndis_send, + TP_PROTO(const struct net_device *ndev, u16 q, + const struct rndis_message *msg), + TP_ARGS(ndev, q, msg) +); + +DEFINE_EVENT(rndis_msg_class, rndis_recv, + TP_PROTO(const struct net_device *ndev, u16 q, + const struct rndis_message *msg), + TP_ARGS(ndev, q, msg) +); + +TRACE_DEFINE_ENUM(NVSP_MSG_TYPE_INIT); +TRACE_DEFINE_ENUM(NVSP_MSG_TYPE_INIT_COMPLETE); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_NDIS_VER); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RECV_BUF); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_REVOKE_RECV_BUF); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_SEND_BUF); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_REVOKE_SEND_BUF); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RNDIS_PKT); +TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE); +TRACE_DEFINE_ENUM(NVSP_MSG2_TYPE_SEND_NDIS_CONFIG); + +TRACE_DEFINE_ENUM(NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION); +TRACE_DEFINE_ENUM(NVSP_MSG4_TYPE_SWITCH_DATA_PATH); + +TRACE_DEFINE_ENUM(NVSP_MSG5_TYPE_SUBCHANNEL); +TRACE_DEFINE_ENUM(NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE); + +#define show_nvsp_type(type) \ + __print_symbolic(type, \ + { NVSP_MSG_TYPE_INIT, "INIT" }, \ + { NVSP_MSG_TYPE_INIT_COMPLETE, "INIT_COMPLETE" }, \ + { NVSP_MSG1_TYPE_SEND_NDIS_VER, "SEND_NDIS_VER" }, \ + { NVSP_MSG1_TYPE_SEND_RECV_BUF, "SEND_RECV_BUF" }, \ + { NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE, "SEND_RECV_BUF_COMPLETE" }, \ + { NVSP_MSG1_TYPE_REVOKE_RECV_BUF, "REVOKE_RECV_BUF" }, \ + { NVSP_MSG1_TYPE_SEND_SEND_BUF, "SEND_SEND_BUF" }, \ + { NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE, "SEND_SEND_BUF_COMPLETE" }, \ + { NVSP_MSG1_TYPE_REVOKE_SEND_BUF, "REVOKE_SEND_BUF" }, \ + { NVSP_MSG1_TYPE_SEND_RNDIS_PKT, "SEND_RNDIS_PKT" }, \ + { NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, "SEND_RNDIS_PKT_COMPLETE" },\ + { NVSP_MSG2_TYPE_SEND_NDIS_CONFIG, "SEND_NDIS_CONFIG" }, \ + { NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION, "SEND_VF_ASSOCIATION" }, \ + { NVSP_MSG4_TYPE_SWITCH_DATA_PATH, "SWITCH_DATA_PATH" }, \ + { NVSP_MSG5_TYPE_SUBCHANNEL, "SUBCHANNEL" }, \ + { NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, "SEND_INDIRECTION_TABLE" }) + +TRACE_EVENT(nvsp_send, + TP_PROTO(const struct net_device *ndev, + const struct nvsp_message *msg), + TP_ARGS(ndev, msg), + TP_STRUCT__entry( + __string( name, ndev->name ) + __field( u32, msg_type ) + ), + TP_fast_assign( + __assign_str(name, ndev->name); + __entry->msg_type = msg->hdr.msg_type; + ), + TP_printk("dev=%s type=%s", + __get_str(name), + show_nvsp_type(__entry->msg_type)) +); + +TRACE_EVENT(nvsp_send_pkt, + TP_PROTO(const struct net_device *ndev, + const struct vmbus_channel *chan, + const struct nvsp_1_message_send_rndis_packet *rpkt), + TP_ARGS(ndev, chan, rpkt), + TP_STRUCT__entry( + __string( name, ndev->name ) + __field( u16, qid ) + __field( u32, channel_type ) + __field( u32, section_index ) + __field( u32, section_size ) + ), + TP_fast_assign( + __assign_str(name, ndev->name); + __entry->qid = chan->offermsg.offer.sub_channel_index; + __entry->channel_type = rpkt->channel_type; + __entry->section_index = rpkt->send_buf_section_index; + __entry->section_size = rpkt->send_buf_section_size; + ), + TP_printk("dev=%s qid=%u type=%s section=%u size=%d", + __get_str(name), __entry->qid, + __entry->channel_type ? "CONTROL" : "DATA", + __entry->section_index, __entry->section_size) +); + +TRACE_EVENT(nvsp_recv, + TP_PROTO(const struct net_device *ndev, + const struct vmbus_channel *chan, + const struct nvsp_message *msg), + TP_ARGS(ndev, chan, msg), + TP_STRUCT__entry( + __string( name, ndev->name ) + __field( u16, qid ) + __field( u32, msg_type ) + ), + TP_fast_assign( + __assign_str(name, ndev->name); + __entry->qid = chan->offermsg.offer.sub_channel_index; + __entry->msg_type = msg->hdr.msg_type; + ), + TP_printk("dev=%s qid=%u type=%s", + __get_str(name), __entry->qid, + show_nvsp_type(__entry->msg_type)) +); + +#endif /* _NETVSC_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/net/hyperv +#include <trace/define_trace.h> diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c3ca191fea7f..6b127be781d9 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -31,6 +31,7 @@ #include <linux/rtnetlink.h> #include "hyperv_net.h" +#include "netvsc_trace.h" static void rndis_set_multicast(struct work_struct *w); @@ -241,6 +242,8 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } + trace_rndis_send(dev->ndev, 0, &req->request_msg); + rcu_read_lock_bh(); ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); @@ -264,13 +267,23 @@ static void rndis_set_link_state(struct rndis_device *rdev, } } -static void rndis_filter_receive_response(struct rndis_device *dev, - struct rndis_message *resp) +static void rndis_filter_receive_response(struct net_device *ndev, + struct netvsc_device *nvdev, + const struct rndis_message *resp) { + struct rndis_device *dev = nvdev->extension; struct rndis_request *request = NULL; bool found = false; unsigned long flags; - struct net_device *ndev = dev->ndev; + + /* This should never happen, it means control message + * response received after device removed. + */ + if (dev->state == RNDIS_DEV_UNINITIALIZED) { + netdev_err(ndev, + "got rndis message uninitialized\n"); + return; + } spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { @@ -352,15 +365,15 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) static int rndis_filter_receive_data(struct net_device *ndev, struct netvsc_device *nvdev, - struct rndis_device *dev, - struct rndis_message *msg, struct vmbus_channel *channel, - void *data, u32 data_buflen) + struct rndis_message *msg, + u32 data_buflen) { struct rndis_packet *rndis_pkt = &msg->msg.pkt; const struct ndis_tcp_ip_checksum_info *csum_info; const struct ndis_pkt_8021q_info *vlan; u32 data_offset; + void *data; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; @@ -372,7 +385,7 @@ static int rndis_filter_receive_data(struct net_device *ndev, * should be the data packet size plus the trailer padding size */ if (unlikely(data_buflen < rndis_pkt->data_len)) { - netdev_err(dev->ndev, "rndis message buffer " + netdev_err(ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", data_buflen, rndis_pkt->data_len); @@ -381,14 +394,15 @@ static int rndis_filter_receive_data(struct net_device *ndev, vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); + csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); + + data = (void *)msg + data_offset; + /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - data = (void *)((unsigned long)data + data_offset); - csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(ndev, nvdev, channel, data, rndis_pkt->data_len, csum_info, vlan); @@ -400,35 +414,20 @@ int rndis_filter_receive(struct net_device *ndev, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct rndis_device *rndis_dev = net_dev->extension; struct rndis_message *rndis_msg = data; - /* Make sure the rndis device state is initialized */ - if (unlikely(!rndis_dev)) { - netif_dbg(net_device_ctx, rx_err, ndev, - "got rndis message but no rndis device!\n"); - return NVSP_STAT_FAIL; - } - - if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { - netif_dbg(net_device_ctx, rx_err, ndev, - "got rndis message uninitialized\n"); - return NVSP_STAT_FAIL; - } - if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(ndev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, net_dev, - rndis_dev, rndis_msg, - channel, data, buflen); + return rndis_filter_receive_data(ndev, net_dev, channel, + rndis_msg, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: /* completion msgs */ - rndis_filter_receive_response(rndis_dev, rndis_msg); + rndis_filter_receive_response(ndev, net_dev, rndis_msg); break; case RNDIS_MSG_INDICATE: @@ -440,10 +439,10 @@ int rndis_filter_receive(struct net_device *ndev, "unhandled rndis message (type %u len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); - break; + return NVSP_STAT_FAIL; } - return 0; + return NVSP_STAT_SUCCESS; } static int rndis_filter_query_device(struct rndis_device *dev, @@ -825,13 +824,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev, struct rndis_set_request *set; int ret; + if (dev->filter == new_filter) + return 0; + request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32)); if (!request) return -ENOMEM; - /* Setup the rndis set */ set = &request->request_msg.msg.set_req; set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; @@ -842,8 +843,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev, &new_filter, sizeof(u32)); ret = rndis_filter_send_request(dev, request); - if (ret == 0) + if (ret == 0) { wait_for_completion(&request->wait_event); + dev->filter = new_filter; + } put_rndis_request(dev, request); @@ -854,15 +857,19 @@ static void rndis_set_multicast(struct work_struct *w) { struct rndis_device *rdev = container_of(w, struct rndis_device, mcast_work); + u32 filter = NDIS_PACKET_TYPE_DIRECTED; + unsigned int flags = rdev->ndev->flags; - if (rdev->ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); + if (flags & IFF_PROMISC) { + filter = NDIS_PACKET_TYPE_PROMISCUOUS; + } else { + if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI)) + filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; + if (flags & IFF_BROADCAST) + filter |= NDIS_PACKET_TYPE_BROADCAST; + } + + rndis_filter_set_packet_filter(rdev, filter); } void rndis_filter_update(struct netvsc_device *nvdev) @@ -940,12 +947,11 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev) return true; } -static void rndis_filter_halt_device(struct rndis_device *dev) +static void rndis_filter_halt_device(struct netvsc_device *nvdev, + struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -1084,6 +1090,8 @@ void rndis_set_subchannel(struct work_struct *w) init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; init_packet->msg.v5_msg.subchn_req.num_subchannels = nvdev->num_chn - 1; + trace_nvsp_send(ndev, init_packet); + ret = vmbus_sendpacket(hv_dev->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -1116,6 +1124,7 @@ void rndis_set_subchannel(struct work_struct *w) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; + netif_device_attach(ndev); rtnl_unlock(); return; @@ -1126,6 +1135,8 @@ failed: nvdev->max_chn = 1; nvdev->num_chn = 1; + + netif_device_attach(ndev); unlock: rtnl_unlock(); } @@ -1328,6 +1339,10 @@ out: net_device->num_chn = 1; } + /* No sub channels, device is ready */ + if (net_device->num_chn == 1) + netif_device_attach(net); + return net_device; err_dev_remv: @@ -1341,12 +1356,11 @@ void rndis_filter_device_remove(struct hv_device *dev, struct rndis_device *rndis_dev = net_dev->extension; /* Halt and release the rndis device */ - rndis_filter_halt_device(rndis_dev); + rndis_filter_halt_device(net_dev, rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); - kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) @@ -1364,10 +1378,3 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } - -bool rndis_filter_opened(const struct netvsc_device *nvdev) -{ - const struct rndis_device *dev = nvdev->extension; - - return dev->state == RNDIS_DEV_DATAINITIALIZED; -} diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 303ba4133920..8782f5655e3f 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -104,3 +104,14 @@ config IEEE802154_CA8210_DEBUGFS exposes a debugfs node for each CA8210 instance which allows direct use of the Cascoda API, exposing the 802.15.4 MAC management entities. + +config IEEE802154_MCR20A + tristate "MCR20A transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI + ---help--- + Say Y here to enable the MCR20A SPI 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so, say M here. + the module will be called 'mcr20a'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index bea1de5e726c..104744d5a668 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o +obj-$(CONFIG_IEEE802154_MCR20A) += mcr20a.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 548d9d026a85..77abedf0b524 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1661,7 +1661,7 @@ static int at86rf230_debugfs_init(struct at86rf230_local *lp) if (!at86rf230_debugfs_root) return -ENOMEM; - stats = debugfs_create_file("trac_stats", S_IRUGO, + stats = debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp, &at86rf230_stats_fops); if (!stats) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 377af43b81b3..58299fb666ed 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write( struct ca8210_priv *priv = filp->private_data; u8 command[CA8210_SPI_BUF_SIZE]; - if (len > CA8210_SPI_BUF_SIZE) { + memset(command, SPI_IDLE, 6); + if (len > CA8210_SPI_BUF_SIZE || len < 2) { dev_warn( &priv->spi->dev, - "userspace requested erroneously long write (%zu)\n", + "userspace requested erroneous write length (%zu)\n", len ); - return -EMSGSIZE; + return -EBADE; } ret = copy_from_user(command, in_buf, len); @@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write( ); return -EIO; } + if (len != command[1] + 2) { + dev_err( + &priv->spi->dev, + "write len does not match packet length field\n" + ); + return -EBADE; + } ret = ca8210_test_check_upstream(command, priv->spi); if (ret == 0) { diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c new file mode 100644 index 000000000000..55a22c761808 --- /dev/null +++ b/drivers/net/ieee802154/mcr20a.c @@ -0,0 +1,1411 @@ +/* + * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller + * + * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/spi/spi.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include <linux/of_gpio.h> +#include <linux/regmap.h> +#include <linux/ieee802154.h> +#include <linux/debugfs.h> + +#include <net/mac802154.h> +#include <net/cfg802154.h> + +#include <linux/device.h> + +#include "mcr20a.h" + +#define SPI_COMMAND_BUFFER 3 + +#define REGISTER_READ BIT(7) +#define REGISTER_WRITE (0 << 7) +#define REGISTER_ACCESS (0 << 6) +#define PACKET_BUFF_BURST_ACCESS BIT(6) +#define PACKET_BUFF_BYTE_ACCESS BIT(5) + +#define MCR20A_WRITE_REG(x) (x) +#define MCR20A_READ_REG(x) (REGISTER_READ | (x)) +#define MCR20A_BURST_READ_PACKET_BUF (0xC0) +#define MCR20A_BURST_WRITE_PACKET_BUF (0x40) + +#define MCR20A_CMD_REG 0x80 +#define MCR20A_CMD_REG_MASK 0x3f +#define MCR20A_CMD_WRITE 0x40 +#define MCR20A_CMD_FB 0x20 + +/* Number of Interrupt Request Status Register */ +#define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */ + +/* MCR20A CCA Type */ +enum { + MCR20A_CCA_ED, // energy detect - CCA bit not active, + // not to be used for T and CCCA sequences + MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE + MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE + MCR20A_CCA_MODE3 +}; + +enum { + MCR20A_XCVSEQ_IDLE = 0x00, + MCR20A_XCVSEQ_RX = 0x01, + MCR20A_XCVSEQ_TX = 0x02, + MCR20A_XCVSEQ_CCA = 0x03, + MCR20A_XCVSEQ_TR = 0x04, + MCR20A_XCVSEQ_CCCA = 0x05, +}; + +/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ +#define MCR20A_MIN_CHANNEL (11) +#define MCR20A_MAX_CHANNEL (26) +#define MCR20A_CHANNEL_SPACING (5) + +/* MCR20A CCA Threshold constans */ +#define MCR20A_MIN_CCA_THRESHOLD (0x6EU) +#define MCR20A_MAX_CCA_THRESHOLD (0x00U) + +/* version 0C */ +#define MCR20A_OVERWRITE_VERSION (0x0C) + +/* MCR20A PLL configurations */ +static const u8 PLL_INT[16] = { + /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B, + /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B, + /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C, + /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C, + /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D, + /* 2480 */ 0x0D +}; + +static const u8 PLL_FRAC[16] = { + /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78, + /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0, + /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68, + /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0, + /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58, + /* 2480 */ 0x80 +}; + +static const struct reg_sequence mar20a_iar_overwrites[] = { + { IAR_MISC_PAD_CTRL, 0x02 }, + { IAR_VCO_CTRL1, 0xB3 }, + { IAR_VCO_CTRL2, 0x07 }, + { IAR_PA_TUNING, 0x71 }, + { IAR_CHF_IBUF, 0x2F }, + { IAR_CHF_QBUF, 0x2F }, + { IAR_CHF_IRIN, 0x24 }, + { IAR_CHF_QRIN, 0x24 }, + { IAR_CHF_IL, 0x24 }, + { IAR_CHF_QL, 0x24 }, + { IAR_CHF_CC1, 0x32 }, + { IAR_CHF_CCL, 0x1D }, + { IAR_CHF_CC2, 0x2D }, + { IAR_CHF_IROUT, 0x24 }, + { IAR_CHF_QROUT, 0x24 }, + { IAR_PA_CAL, 0x28 }, + { IAR_AGC_THR1, 0x55 }, + { IAR_AGC_THR2, 0x2D }, + { IAR_ATT_RSSI1, 0x5F }, + { IAR_ATT_RSSI2, 0x8F }, + { IAR_RSSI_OFFSET, 0x61 }, + { IAR_CHF_PMA_GAIN, 0x03 }, + { IAR_CCA1_THRESH, 0x50 }, + { IAR_CORR_NVAL, 0x13 }, + { IAR_ACKDELAY, 0x3D }, +}; + +#define MCR20A_VALID_CHANNELS (0x07FFF800) + +struct mcr20a_platform_data { + int rst_gpio; +}; + +#define MCR20A_MAX_BUF (127) + +#define printdev(X) (&X->spi->dev) + +/* regmap information for Direct Access Register (DAR) access */ +#define MCR20A_DAR_WRITE 0x01 +#define MCR20A_DAR_READ 0x00 +#define MCR20A_DAR_NUMREGS 0x3F + +/* regmap information for Indirect Access Register (IAR) access */ +#define MCR20A_IAR_ACCESS 0x80 +#define MCR20A_IAR_NUMREGS 0xBEFF + +/* Read/Write SPI Commands for DAR and IAR registers. */ +#define MCR20A_READSHORT(reg) ((reg) << 1) +#define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1) +#define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5) +#define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) + +/* Type definitions for link configuration of instantiable layers */ +#define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12) + +static bool +mcr20a_dar_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case DAR_IRQ_STS1: + case DAR_IRQ_STS2: + case DAR_IRQ_STS3: + case DAR_PHY_CTRL1: + case DAR_PHY_CTRL2: + case DAR_PHY_CTRL3: + case DAR_PHY_CTRL4: + case DAR_SRC_CTRL: + case DAR_SRC_ADDRS_SUM_LSB: + case DAR_SRC_ADDRS_SUM_MSB: + case DAR_T3CMP_LSB: + case DAR_T3CMP_MSB: + case DAR_T3CMP_USB: + case DAR_T2PRIMECMP_LSB: + case DAR_T2PRIMECMP_MSB: + case DAR_T1CMP_LSB: + case DAR_T1CMP_MSB: + case DAR_T1CMP_USB: + case DAR_T2CMP_LSB: + case DAR_T2CMP_MSB: + case DAR_T2CMP_USB: + case DAR_T4CMP_LSB: + case DAR_T4CMP_MSB: + case DAR_T4CMP_USB: + case DAR_PLL_INT0: + case DAR_PLL_FRAC0_LSB: + case DAR_PLL_FRAC0_MSB: + case DAR_PA_PWR: + /* no DAR_ACM */ + case DAR_OVERWRITE_VER: + case DAR_CLK_OUT_CTRL: + case DAR_PWR_MODES: + return true; + default: + return false; + } +} + +static bool +mcr20a_dar_readable(struct device *dev, unsigned int reg) +{ + bool rc; + + /* all writeable are also readable */ + rc = mcr20a_dar_writeable(dev, reg); + if (rc) + return rc; + + /* readonly regs */ + switch (reg) { + case DAR_RX_FRM_LEN: + case DAR_CCA1_ED_FNL: + case DAR_EVENT_TMR_LSB: + case DAR_EVENT_TMR_MSB: + case DAR_EVENT_TMR_USB: + case DAR_TIMESTAMP_LSB: + case DAR_TIMESTAMP_MSB: + case DAR_TIMESTAMP_USB: + case DAR_SEQ_STATE: + case DAR_LQI_VALUE: + case DAR_RSSI_CCA_CONT: + return true; + default: + return false; + } +} + +static bool +mcr20a_dar_volatile(struct device *dev, unsigned int reg) +{ + /* can be changed during runtime */ + switch (reg) { + case DAR_IRQ_STS1: + case DAR_IRQ_STS2: + case DAR_IRQ_STS3: + /* use them in spi_async and regmap so it's volatile */ + return true; + default: + return false; + } +} + +static bool +mcr20a_dar_precious(struct device *dev, unsigned int reg) +{ + /* don't clear irq line on read */ + switch (reg) { + case DAR_IRQ_STS1: + case DAR_IRQ_STS2: + case DAR_IRQ_STS3: + return true; + default: + return false; + } +} + +static const struct regmap_config mcr20a_dar_regmap = { + .name = "mcr20a_dar", + .reg_bits = 8, + .val_bits = 8, + .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE, + .read_flag_mask = REGISTER_ACCESS | REGISTER_READ, + .cache_type = REGCACHE_RBTREE, + .writeable_reg = mcr20a_dar_writeable, + .readable_reg = mcr20a_dar_readable, + .volatile_reg = mcr20a_dar_volatile, + .precious_reg = mcr20a_dar_precious, + .fast_io = true, + .can_multi_write = true, +}; + +static bool +mcr20a_iar_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case IAR_XTAL_TRIM: + case IAR_PMC_LP_TRIM: + case IAR_MACPANID0_LSB: + case IAR_MACPANID0_MSB: + case IAR_MACSHORTADDRS0_LSB: + case IAR_MACSHORTADDRS0_MSB: + case IAR_MACLONGADDRS0_0: + case IAR_MACLONGADDRS0_8: + case IAR_MACLONGADDRS0_16: + case IAR_MACLONGADDRS0_24: + case IAR_MACLONGADDRS0_32: + case IAR_MACLONGADDRS0_40: + case IAR_MACLONGADDRS0_48: + case IAR_MACLONGADDRS0_56: + case IAR_RX_FRAME_FILTER: + case IAR_PLL_INT1: + case IAR_PLL_FRAC1_LSB: + case IAR_PLL_FRAC1_MSB: + case IAR_MACPANID1_LSB: + case IAR_MACPANID1_MSB: + case IAR_MACSHORTADDRS1_LSB: + case IAR_MACSHORTADDRS1_MSB: + case IAR_MACLONGADDRS1_0: + case IAR_MACLONGADDRS1_8: + case IAR_MACLONGADDRS1_16: + case IAR_MACLONGADDRS1_24: + case IAR_MACLONGADDRS1_32: + case IAR_MACLONGADDRS1_40: + case IAR_MACLONGADDRS1_48: + case IAR_MACLONGADDRS1_56: + case IAR_DUAL_PAN_CTRL: + case IAR_DUAL_PAN_DWELL: + case IAR_CCA1_THRESH: + case IAR_CCA1_ED_OFFSET_COMP: + case IAR_LQI_OFFSET_COMP: + case IAR_CCA_CTRL: + case IAR_CCA2_CORR_PEAKS: + case IAR_CCA2_CORR_THRESH: + case IAR_TMR_PRESCALE: + case IAR_ANT_PAD_CTRL: + case IAR_MISC_PAD_CTRL: + case IAR_BSM_CTRL: + case IAR_RNG: + case IAR_RX_WTR_MARK: + case IAR_SOFT_RESET: + case IAR_TXDELAY: + case IAR_ACKDELAY: + case IAR_CORR_NVAL: + case IAR_ANT_AGC_CTRL: + case IAR_AGC_THR1: + case IAR_AGC_THR2: + case IAR_PA_CAL: + case IAR_ATT_RSSI1: + case IAR_ATT_RSSI2: + case IAR_RSSI_OFFSET: + case IAR_XTAL_CTRL: + case IAR_CHF_PMA_GAIN: + case IAR_CHF_IBUF: + case IAR_CHF_QBUF: + case IAR_CHF_IRIN: + case IAR_CHF_QRIN: + case IAR_CHF_IL: + case IAR_CHF_QL: + case IAR_CHF_CC1: + case IAR_CHF_CCL: + case IAR_CHF_CC2: + case IAR_CHF_IROUT: + case IAR_CHF_QROUT: + case IAR_PA_TUNING: + case IAR_VCO_CTRL1: + case IAR_VCO_CTRL2: + return true; + default: + return false; + } +} + +static bool +mcr20a_iar_readable(struct device *dev, unsigned int reg) +{ + bool rc; + + /* all writeable are also readable */ + rc = mcr20a_iar_writeable(dev, reg); + if (rc) + return rc; + + /* readonly regs */ + switch (reg) { + case IAR_PART_ID: + case IAR_DUAL_PAN_STS: + case IAR_RX_BYTE_COUNT: + case IAR_FILTERFAIL_CODE1: + case IAR_FILTERFAIL_CODE2: + case IAR_RSSI: + return true; + default: + return false; + } +} + +static bool +mcr20a_iar_volatile(struct device *dev, unsigned int reg) +{ +/* can be changed during runtime */ + switch (reg) { + case IAR_DUAL_PAN_STS: + case IAR_RX_BYTE_COUNT: + case IAR_FILTERFAIL_CODE1: + case IAR_FILTERFAIL_CODE2: + case IAR_RSSI: + return true; + default: + return false; + } +} + +static const struct regmap_config mcr20a_iar_regmap = { + .name = "mcr20a_iar", + .reg_bits = 16, + .val_bits = 8, + .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX, + .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX, + .cache_type = REGCACHE_RBTREE, + .writeable_reg = mcr20a_iar_writeable, + .readable_reg = mcr20a_iar_readable, + .volatile_reg = mcr20a_iar_volatile, + .fast_io = true, +}; + +struct mcr20a_local { + struct spi_device *spi; + + struct ieee802154_hw *hw; + struct mcr20a_platform_data *pdata; + struct regmap *regmap_dar; + struct regmap *regmap_iar; + + u8 *buf; + + bool is_tx; + + /* for writing tx buffer */ + struct spi_message tx_buf_msg; + u8 tx_header[1]; + /* burst buffer write command */ + struct spi_transfer tx_xfer_header; + u8 tx_len[1]; + /* len of tx packet */ + struct spi_transfer tx_xfer_len; + /* data of tx packet */ + struct spi_transfer tx_xfer_buf; + struct sk_buff *tx_skb; + + /* for read length rxfifo */ + struct spi_message reg_msg; + u8 reg_cmd[1]; + u8 reg_data[MCR20A_IRQSTS_NUM]; + struct spi_transfer reg_xfer_cmd; + struct spi_transfer reg_xfer_data; + + /* receive handling */ + struct spi_message rx_buf_msg; + u8 rx_header[1]; + struct spi_transfer rx_xfer_header; + u8 rx_lqi[1]; + struct spi_transfer rx_xfer_lqi; + u8 rx_buf[MCR20A_MAX_BUF]; + struct spi_transfer rx_xfer_buf; + + /* isr handling for reading intstat */ + struct spi_message irq_msg; + u8 irq_header[1]; + u8 irq_data[MCR20A_IRQSTS_NUM]; + struct spi_transfer irq_xfer_data; + struct spi_transfer irq_xfer_header; +}; + +static void +mcr20a_write_tx_buf_complete(void *context) +{ + struct mcr20a_local *lp = context; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + lp->reg_msg.complete = NULL; + lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); + lp->reg_data[0] = MCR20A_XCVSEQ_TX; + lp->reg_xfer_data.len = 1; + + ret = spi_async(lp->spi, &lp->reg_msg); + if (ret) + dev_err(printdev(lp), "failed to set SEQ TX\n"); +} + +static int +mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct mcr20a_local *lp = hw->priv; + + dev_dbg(printdev(lp), "%s\n", __func__); + + lp->tx_skb = skb; + + print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, 0); + + lp->is_tx = 1; + + lp->reg_msg.complete = NULL; + lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1); + lp->reg_data[0] = MCR20A_XCVSEQ_IDLE; + lp->reg_xfer_data.len = 1; + + return spi_async(lp->spi, &lp->reg_msg); +} + +static int +mcr20a_ed(struct ieee802154_hw *hw, u8 *level) +{ + WARN_ON(!level); + *level = 0xbe; + return 0; +} + +static int +mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct mcr20a_local *lp = hw->priv; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */ + ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]); + if (ret) + return ret; + ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00); + if (ret) + return ret; + ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB, + PLL_FRAC[channel - 11]); + if (ret) + return ret; + + return 0; +} + +static int +mcr20a_start(struct ieee802154_hw *hw) +{ + struct mcr20a_local *lp = hw->priv; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* No slotted operation */ + dev_dbg(printdev(lp), "no slotted operation\n"); + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_SLOTTED, 0x0); + + /* enable irq */ + enable_irq(lp->spi->irq); + + /* Unmask SEQ interrupt */ + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, + DAR_PHY_CTRL2_SEQMSK, 0x0); + + /* Start the RX sequence */ + dev_dbg(printdev(lp), "start the RX sequence\n"); + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); + + return 0; +} + +static void +mcr20a_stop(struct ieee802154_hw *hw) +{ + struct mcr20a_local *lp = hw->priv; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* stop all running sequence */ + regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); + + /* disable irq */ + disable_irq(lp->spi->irq); +} + +static int +mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct mcr20a_local *lp = hw->priv; + + dev_dbg(printdev(lp), "%s\n", __func__); + + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { + u16 addr = le16_to_cpu(filt->short_addr); + + regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr); + regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8); + } + + if (changed & IEEE802154_AFILT_PANID_CHANGED) { + u16 pan = le16_to_cpu(filt->pan_id); + + regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan); + regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8); + } + + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { + u8 addr[8], i; + + memcpy(addr, &filt->ieee_addr, 8); + for (i = 0; i < 8; i++) + regmap_write(lp->regmap_iar, + IAR_MACLONGADDRS0_0 + i, addr[i]); + } + + if (changed & IEEE802154_AFILT_PANC_CHANGED) { + if (filt->pan_coord) { + regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, + DAR_PHY_CTRL4_PANCORDNTR0, 0x10); + } else { + regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, + DAR_PHY_CTRL4_PANCORDNTR0, 0x00); + } + } + + return 0; +} + +/* -30 dBm to 10 dBm */ +#define MCR20A_MAX_TX_POWERS 0x14 +static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = { + -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400, + -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000 +}; + +static int +mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm) +{ + struct mcr20a_local *lp = hw->priv; + u32 i; + + dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm); + + for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { + if (lp->hw->phy->supported.tx_powers[i] == mbm) + return regmap_write(lp->regmap_dar, DAR_PA_PWR, + ((i + 8) & 0x1F)); + } + + return -EINVAL; +} + +#define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD +static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1]; + +static int +mcr20a_set_cca_mode(struct ieee802154_hw *hw, + const struct wpan_phy_cca *cca) +{ + struct mcr20a_local *lp = hw->priv; + unsigned int cca_mode = 0xff; + bool cca_mode_and = false; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* mapping 802.15.4 to driver spec */ + switch (cca->mode) { + case NL802154_CCA_ENERGY: + cca_mode = MCR20A_CCA_MODE1; + break; + case NL802154_CCA_CARRIER: + cca_mode = MCR20A_CCA_MODE2; + break; + case NL802154_CCA_ENERGY_CARRIER: + switch (cca->opt) { + case NL802154_CCA_OPT_ENERGY_CARRIER_AND: + cca_mode = MCR20A_CCA_MODE3; + cca_mode_and = true; + break; + case NL802154_CCA_OPT_ENERGY_CARRIER_OR: + cca_mode = MCR20A_CCA_MODE3; + cca_mode_and = false; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, + DAR_PHY_CTRL4_CCATYPE_MASK, + cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT); + if (ret < 0) + return ret; + + if (cca_mode == MCR20A_CCA_MODE3) { + if (cca_mode_and) { + ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL, + IAR_CCA_CTRL_CCA3_AND_NOT_OR, + 0x08); + } else { + ret = regmap_update_bits(lp->regmap_iar, + IAR_CCA_CTRL, + IAR_CCA_CTRL_CCA3_AND_NOT_OR, + 0x00); + } + if (ret < 0) + return ret; + } + + return ret; +} + +static int +mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) +{ + struct mcr20a_local *lp = hw->priv; + u32 i; + + dev_dbg(printdev(lp), "%s\n", __func__); + + for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { + if (hw->phy->supported.cca_ed_levels[i] == mbm) + return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i); + } + + return 0; +} + +static int +mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) +{ + struct mcr20a_local *lp = hw->priv; + int ret; + u8 rx_frame_filter_reg = 0x0; + + dev_dbg(printdev(lp), "%s(%d)\n", __func__, on); + + if (on) { + /* All frame types accepted*/ + rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER); + rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT | + IAR_RX_FRAME_FLT_NS_FT); + + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, + DAR_PHY_CTRL4_PROMISCUOUS, + DAR_PHY_CTRL4_PROMISCUOUS); + if (ret < 0) + return ret; + + ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, + rx_frame_filter_reg); + if (ret < 0) + return ret; + } else { + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4, + DAR_PHY_CTRL4_PROMISCUOUS, 0x0); + if (ret < 0) + return ret; + + ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, + IAR_RX_FRAME_FLT_FRM_VER | + IAR_RX_FRAME_FLT_BEACON_FT | + IAR_RX_FRAME_FLT_DATA_FT | + IAR_RX_FRAME_FLT_CMD_FT); + if (ret < 0) + return ret; + } + + return 0; +} + +static const struct ieee802154_ops mcr20a_hw_ops = { + .owner = THIS_MODULE, + .xmit_async = mcr20a_xmit, + .ed = mcr20a_ed, + .set_channel = mcr20a_set_channel, + .start = mcr20a_start, + .stop = mcr20a_stop, + .set_hw_addr_filt = mcr20a_set_hw_addr_filt, + .set_txpower = mcr20a_set_txpower, + .set_cca_mode = mcr20a_set_cca_mode, + .set_cca_ed_level = mcr20a_set_cca_ed_level, + .set_promiscuous_mode = mcr20a_set_promiscuous_mode, +}; + +static int +mcr20a_request_rx(struct mcr20a_local *lp) +{ + dev_dbg(printdev(lp), "%s\n", __func__); + + /* Start the RX sequence */ + regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); + + return 0; +} + +static void +mcr20a_handle_rx_read_buf_complete(void *context) +{ + struct mcr20a_local *lp = context; + u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; + struct sk_buff *skb; + + dev_dbg(printdev(lp), "%s\n", __func__); + + dev_dbg(printdev(lp), "RX is done\n"); + + if (!ieee802154_is_valid_psdu_len(len)) { + dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); + len = IEEE802154_MTU; + } + + len = len - 2; /* get rid of frame check field */ + + skb = dev_alloc_skb(len); + if (!skb) + return; + + memcpy(skb_put(skb, len), lp->rx_buf, len); + ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]); + + print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1, + lp->rx_buf, len, 0); + pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]); + + /* start RX sequence */ + mcr20a_request_rx(lp); +} + +static void +mcr20a_handle_rx_read_len_complete(void *context) +{ + struct mcr20a_local *lp = context; + u8 len; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* get the length of received frame */ + len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK; + dev_dbg(printdev(lp), "frame len : %d\n", len); + + /* prepare to read the rx buf */ + lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; + lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF; + lp->rx_xfer_buf.len = len; + + ret = spi_async(lp->spi, &lp->rx_buf_msg); + if (ret) + dev_err(printdev(lp), "failed to read rx buffer length\n"); +} + +static int +mcr20a_handle_rx(struct mcr20a_local *lp) +{ + dev_dbg(printdev(lp), "%s\n", __func__); + lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete; + lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN); + lp->reg_xfer_data.len = 1; + + return spi_async(lp->spi, &lp->reg_msg); +} + +static int +mcr20a_handle_tx_complete(struct mcr20a_local *lp) +{ + dev_dbg(printdev(lp), "%s\n", __func__); + + ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); + + return mcr20a_request_rx(lp); +} + +static int +mcr20a_handle_tx(struct mcr20a_local *lp) +{ + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* write tx buffer */ + lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF; + /* add 2 bytes of FCS */ + lp->tx_len[0] = lp->tx_skb->len + 2; + lp->tx_xfer_buf.tx_buf = lp->tx_skb->data; + /* add 1 byte psduLength */ + lp->tx_xfer_buf.len = lp->tx_skb->len + 1; + + ret = spi_async(lp->spi, &lp->tx_buf_msg); + if (ret) { + dev_err(printdev(lp), "SPI write Failed for TX buf\n"); + return ret; + } + + return 0; +} + +static void +mcr20a_irq_clean_complete(void *context) +{ + struct mcr20a_local *lp = context; + u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK; + + dev_dbg(printdev(lp), "%s\n", __func__); + + enable_irq(lp->spi->irq); + + dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n", + lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]); + + switch (seq_state) { + /* TX IRQ, RX IRQ and SEQ IRQ */ + case (0x03): + if (lp->is_tx) { + lp->is_tx = 0; + dev_dbg(printdev(lp), "TX is done. No ACK\n"); + mcr20a_handle_tx_complete(lp); + } + break; + case (0x05): + /* rx is starting */ + dev_dbg(printdev(lp), "RX is starting\n"); + mcr20a_handle_rx(lp); + break; + case (0x07): + if (lp->is_tx) { + /* tx is done */ + lp->is_tx = 0; + dev_dbg(printdev(lp), "TX is done. Get ACK\n"); + mcr20a_handle_tx_complete(lp); + } else { + /* rx is starting */ + dev_dbg(printdev(lp), "RX is starting\n"); + mcr20a_handle_rx(lp); + } + break; + case (0x01): + if (lp->is_tx) { + dev_dbg(printdev(lp), "TX is starting\n"); + mcr20a_handle_tx(lp); + } else { + dev_dbg(printdev(lp), "MCR20A is stop\n"); + } + break; + } +} + +static void mcr20a_irq_status_complete(void *context) +{ + int ret; + struct mcr20a_local *lp = context; + + dev_dbg(printdev(lp), "%s\n", __func__); + regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE); + + lp->reg_msg.complete = mcr20a_irq_clean_complete; + lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1); + memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM); + lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM; + + ret = spi_async(lp->spi, &lp->reg_msg); + + if (ret) + dev_err(printdev(lp), "failed to clean irq status\n"); +} + +static irqreturn_t mcr20a_irq_isr(int irq, void *data) +{ + struct mcr20a_local *lp = data; + int ret; + + disable_irq_nosync(irq); + + lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1); + /* read IRQSTSx */ + ret = spi_async(lp->spi, &lp->irq_msg); + if (ret) { + enable_irq(irq); + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +static int mcr20a_get_platform_data(struct spi_device *spi, + struct mcr20a_platform_data *pdata) +{ + int ret = 0; + + if (!spi->dev.of_node) + return -EINVAL; + + pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0); + dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio); + + return ret; +} + +static void mcr20a_hw_setup(struct mcr20a_local *lp) +{ + u8 i; + struct ieee802154_hw *hw = lp->hw; + struct wpan_phy *phy = lp->hw->phy; + + dev_dbg(printdev(lp), "%s\n", __func__); + + phy->symbol_duration = 16; + phy->lifs_period = 40; + phy->sifs_period = 12; + + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | + IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; + + phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; + + phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | + BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); + phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | + BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); + + /* initiating cca_ed_levels */ + for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1; + ++i) { + mcr20a_ed_levels[i] = -i * 100; + } + + phy->supported.cca_ed_levels = mcr20a_ed_levels; + phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels); + + phy->cca.mode = NL802154_CCA_ENERGY; + + phy->supported.channels[0] = MCR20A_VALID_CHANNELS; + phy->current_page = 0; + /* MCR20A default reset value */ + phy->current_channel = 20; + phy->symbol_duration = 16; + phy->supported.tx_powers = mcr20a_powers; + phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); + phy->cca_ed_level = phy->supported.cca_ed_levels[75]; + phy->transmit_power = phy->supported.tx_powers[0x0F]; +} + +static void +mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp) +{ + spi_message_init(&lp->tx_buf_msg); + lp->tx_buf_msg.context = lp; + lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete; + + lp->tx_xfer_header.len = 1; + lp->tx_xfer_header.tx_buf = lp->tx_header; + + lp->tx_xfer_len.len = 1; + lp->tx_xfer_len.tx_buf = lp->tx_len; + + spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg); + spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg); + spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg); +} + +static void +mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp) +{ + spi_message_init(&lp->reg_msg); + lp->reg_msg.context = lp; + + lp->reg_xfer_cmd.len = 1; + lp->reg_xfer_cmd.tx_buf = lp->reg_cmd; + lp->reg_xfer_cmd.rx_buf = lp->reg_cmd; + + lp->reg_xfer_data.rx_buf = lp->reg_data; + lp->reg_xfer_data.tx_buf = lp->reg_data; + + spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg); + spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg); + + spi_message_init(&lp->rx_buf_msg); + lp->rx_buf_msg.context = lp; + lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete; + lp->rx_xfer_header.len = 1; + lp->rx_xfer_header.tx_buf = lp->rx_header; + lp->rx_xfer_header.rx_buf = lp->rx_header; + + lp->rx_xfer_buf.rx_buf = lp->rx_buf; + + lp->rx_xfer_lqi.len = 1; + lp->rx_xfer_lqi.rx_buf = lp->rx_lqi; + + spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg); + spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg); + spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg); +} + +static void +mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp) +{ + spi_message_init(&lp->irq_msg); + lp->irq_msg.context = lp; + lp->irq_msg.complete = mcr20a_irq_status_complete; + lp->irq_xfer_header.len = 1; + lp->irq_xfer_header.tx_buf = lp->irq_header; + lp->irq_xfer_header.rx_buf = lp->irq_header; + + lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM; + lp->irq_xfer_data.rx_buf = lp->irq_data; + + spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg); + spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg); +} + +static int +mcr20a_phy_init(struct mcr20a_local *lp) +{ + u8 index; + unsigned int phy_reg = 0; + int ret; + + dev_dbg(printdev(lp), "%s\n", __func__); + + /* Disable Tristate on COCO MISO for SPI reads */ + ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02); + if (ret) + goto err_ret; + + /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts + * immediately after init + */ + ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF); + if (ret) + goto err_ret; + + /* Clear all PP IRQ bits in IRQSTS2 */ + ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2, + DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ | + DAR_IRQSTS2_WAKE_IRQ); + if (ret) + goto err_ret; + + /* Disable all timer interrupts */ + ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF); + if (ret) + goto err_ret; + + /* PHY_CTRL1 : default HW settings + AUTOACK enabled */ + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, + DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK); + + /* PHY_CTRL2 : disable all interrupts */ + ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF); + if (ret) + goto err_ret; + + /* PHY_CTRL3 : disable all timers and remaining interrupts */ + ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3, + DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK | + DAR_PHY_CTRL3_WAKE_MSK); + if (ret) + goto err_ret; + + /* SRC_CTRL : enable Acknowledge Frame Pending and + * Source Address Matching Enable + */ + ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, + DAR_SRC_CTRL_ACK_FRM_PND | + (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT)); + if (ret) + goto err_ret; + + /* RX_FRAME_FILTER */ + /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */ + ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER, + IAR_RX_FRAME_FLT_FRM_VER | + IAR_RX_FRAME_FLT_BEACON_FT | + IAR_RX_FRAME_FLT_DATA_FT | + IAR_RX_FRAME_FLT_CMD_FT); + if (ret) + goto err_ret; + + dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n", + MCR20A_OVERWRITE_VERSION); + + /* Overwrites direct registers */ + ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER, + MCR20A_OVERWRITE_VERSION); + if (ret) + goto err_ret; + + /* Overwrites indirect registers */ + ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites, + ARRAY_SIZE(mar20a_iar_overwrites)); + if (ret) + goto err_ret; + + /* Clear HW indirect queue */ + dev_dbg(printdev(lp), "clear HW indirect queue\n"); + for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) { + phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) << + DAR_SRC_CTRL_INDEX_SHIFT) + | (DAR_SRC_CTRL_SRCADDR_EN) + | (DAR_SRC_CTRL_INDEX_DISABLE)); + ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg); + if (ret) + goto err_ret; + phy_reg = 0; + } + + /* Assign HW Indirect hash table to PAN0 */ + ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg); + if (ret) + goto err_ret; + + /* Clear current lvl */ + phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK; + + /* Set new lvl */ + phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE << + IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT; + ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg); + if (ret) + goto err_ret; + + /* Set CCA threshold to -75 dBm */ + ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B); + if (ret) + goto err_ret; + + /* Set prescaller to obtain 1 symbol (16us) timebase */ + ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05); + if (ret) + goto err_ret; + + /* Enable autodoze mode. */ + ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES, + DAR_PWR_MODES_AUTODOZE, + DAR_PWR_MODES_AUTODOZE); + if (ret) + goto err_ret; + + /* Disable clk_out */ + ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL, + DAR_CLK_OUT_CTRL_EN, 0x0); + if (ret) + goto err_ret; + + return 0; + +err_ret: + return ret; +} + +static int +mcr20a_probe(struct spi_device *spi) +{ + struct ieee802154_hw *hw; + struct mcr20a_local *lp; + struct mcr20a_platform_data *pdata; + int irq_type; + int ret = -ENOMEM; + + dev_dbg(&spi->dev, "%s\n", __func__); + + if (!spi->irq) { + dev_err(&spi->dev, "no IRQ specified\n"); + return -EINVAL; + } + + pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* set mcr20a platform data */ + ret = mcr20a_get_platform_data(spi, pdata); + if (ret < 0) { + dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); + return ret; + } + + /* init reset gpio */ + if (gpio_is_valid(pdata->rst_gpio)) { + ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, + GPIOF_OUT_INIT_HIGH, "reset"); + if (ret) + return ret; + } + + /* reset mcr20a */ + if (gpio_is_valid(pdata->rst_gpio)) { + usleep_range(10, 20); + gpio_set_value_cansleep(pdata->rst_gpio, 0); + usleep_range(10, 20); + gpio_set_value_cansleep(pdata->rst_gpio, 1); + usleep_range(120, 240); + } + + /* allocate ieee802154_hw and private data */ + hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); + if (!hw) { + dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); + return -ENOMEM; + } + + /* init mcr20a local data */ + lp = hw->priv; + lp->hw = hw; + lp->spi = spi; + lp->spi->dev.platform_data = pdata; + lp->pdata = pdata; + + /* init ieee802154_hw */ + hw->parent = &spi->dev; + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + /* init buf */ + lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); + + if (!lp->buf) + return -ENOMEM; + + mcr20a_setup_tx_spi_messages(lp); + mcr20a_setup_rx_spi_messages(lp); + mcr20a_setup_irq_spi_messages(lp); + + /* setup regmap */ + lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap); + if (IS_ERR(lp->regmap_dar)) { + ret = PTR_ERR(lp->regmap_dar); + dev_err(&spi->dev, "Failed to allocate dar map: %d\n", + ret); + goto free_dev; + } + + lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap); + if (IS_ERR(lp->regmap_iar)) { + ret = PTR_ERR(lp->regmap_iar); + dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret); + goto free_dev; + } + + mcr20a_hw_setup(lp); + + spi_set_drvdata(spi, lp); + + ret = mcr20a_phy_init(lp); + if (ret < 0) { + dev_crit(&spi->dev, "mcr20a_phy_init failed\n"); + goto free_dev; + } + + irq_type = irq_get_trigger_type(spi->irq); + if (!irq_type) + irq_type = IRQF_TRIGGER_FALLING; + + ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr, + irq_type, dev_name(&spi->dev), lp); + if (ret) { + dev_err(&spi->dev, "could not request_irq for mcr20a\n"); + ret = -ENODEV; + goto free_dev; + } + + /* disable_irq by default and wait for starting hardware */ + disable_irq(spi->irq); + + ret = ieee802154_register_hw(hw); + if (ret) { + dev_crit(&spi->dev, "ieee802154_register_hw failed\n"); + goto free_dev; + } + + return ret; + +free_dev: + ieee802154_free_hw(lp->hw); + + return ret; +} + +static int mcr20a_remove(struct spi_device *spi) +{ + struct mcr20a_local *lp = spi_get_drvdata(spi); + + dev_dbg(&spi->dev, "%s\n", __func__); + + ieee802154_unregister_hw(lp->hw); + ieee802154_free_hw(lp->hw); + + return 0; +} + +static const struct of_device_id mcr20a_of_match[] = { + { .compatible = "nxp,mcr20a", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mcr20a_of_match); + +static const struct spi_device_id mcr20a_device_id[] = { + { .name = "mcr20a", }, + { }, +}; +MODULE_DEVICE_TABLE(spi, mcr20a_device_id); + +static struct spi_driver mcr20a_driver = { + .id_table = mcr20a_device_id, + .driver = { + .of_match_table = of_match_ptr(mcr20a_of_match), + .name = "mcr20a", + }, + .probe = mcr20a_probe, + .remove = mcr20a_remove, +}; + +module_spi_driver(mcr20a_driver); + +MODULE_DESCRIPTION("MCR20A Transceiver Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>"); diff --git a/drivers/net/ieee802154/mcr20a.h b/drivers/net/ieee802154/mcr20a.h new file mode 100644 index 000000000000..6da4fd00b3c5 --- /dev/null +++ b/drivers/net/ieee802154/mcr20a.h @@ -0,0 +1,498 @@ +/* + * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller + * + * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _MCR20A_H +#define _MCR20A_H + +/* Direct Accress Register */ +#define DAR_IRQ_STS1 0x00 +#define DAR_IRQ_STS2 0x01 +#define DAR_IRQ_STS3 0x02 +#define DAR_PHY_CTRL1 0x03 +#define DAR_PHY_CTRL2 0x04 +#define DAR_PHY_CTRL3 0x05 +#define DAR_RX_FRM_LEN 0x06 +#define DAR_PHY_CTRL4 0x07 +#define DAR_SRC_CTRL 0x08 +#define DAR_SRC_ADDRS_SUM_LSB 0x09 +#define DAR_SRC_ADDRS_SUM_MSB 0x0A +#define DAR_CCA1_ED_FNL 0x0B +#define DAR_EVENT_TMR_LSB 0x0C +#define DAR_EVENT_TMR_MSB 0x0D +#define DAR_EVENT_TMR_USB 0x0E +#define DAR_TIMESTAMP_LSB 0x0F +#define DAR_TIMESTAMP_MSB 0x10 +#define DAR_TIMESTAMP_USB 0x11 +#define DAR_T3CMP_LSB 0x12 +#define DAR_T3CMP_MSB 0x13 +#define DAR_T3CMP_USB 0x14 +#define DAR_T2PRIMECMP_LSB 0x15 +#define DAR_T2PRIMECMP_MSB 0x16 +#define DAR_T1CMP_LSB 0x17 +#define DAR_T1CMP_MSB 0x18 +#define DAR_T1CMP_USB 0x19 +#define DAR_T2CMP_LSB 0x1A +#define DAR_T2CMP_MSB 0x1B +#define DAR_T2CMP_USB 0x1C +#define DAR_T4CMP_LSB 0x1D +#define DAR_T4CMP_MSB 0x1E +#define DAR_T4CMP_USB 0x1F +#define DAR_PLL_INT0 0x20 +#define DAR_PLL_FRAC0_LSB 0x21 +#define DAR_PLL_FRAC0_MSB 0x22 +#define DAR_PA_PWR 0x23 +#define DAR_SEQ_STATE 0x24 +#define DAR_LQI_VALUE 0x25 +#define DAR_RSSI_CCA_CONT 0x26 +/*------------------ 0x27 */ +#define DAR_ASM_CTRL1 0x28 +#define DAR_ASM_CTRL2 0x29 +#define DAR_ASM_DATA_0 0x2A +#define DAR_ASM_DATA_1 0x2B +#define DAR_ASM_DATA_2 0x2C +#define DAR_ASM_DATA_3 0x2D +#define DAR_ASM_DATA_4 0x2E +#define DAR_ASM_DATA_5 0x2F +#define DAR_ASM_DATA_6 0x30 +#define DAR_ASM_DATA_7 0x31 +#define DAR_ASM_DATA_8 0x32 +#define DAR_ASM_DATA_9 0x33 +#define DAR_ASM_DATA_A 0x34 +#define DAR_ASM_DATA_B 0x35 +#define DAR_ASM_DATA_C 0x36 +#define DAR_ASM_DATA_D 0x37 +#define DAR_ASM_DATA_E 0x38 +#define DAR_ASM_DATA_F 0x39 +/*----------------------- 0x3A */ +#define DAR_OVERWRITE_VER 0x3B +#define DAR_CLK_OUT_CTRL 0x3C +#define DAR_PWR_MODES 0x3D +#define IAR_INDEX 0x3E +#define IAR_DATA 0x3F + +/* Indirect Resgister Memory */ +#define IAR_PART_ID 0x00 +#define IAR_XTAL_TRIM 0x01 +#define IAR_PMC_LP_TRIM 0x02 +#define IAR_MACPANID0_LSB 0x03 +#define IAR_MACPANID0_MSB 0x04 +#define IAR_MACSHORTADDRS0_LSB 0x05 +#define IAR_MACSHORTADDRS0_MSB 0x06 +#define IAR_MACLONGADDRS0_0 0x07 +#define IAR_MACLONGADDRS0_8 0x08 +#define IAR_MACLONGADDRS0_16 0x09 +#define IAR_MACLONGADDRS0_24 0x0A +#define IAR_MACLONGADDRS0_32 0x0B +#define IAR_MACLONGADDRS0_40 0x0C +#define IAR_MACLONGADDRS0_48 0x0D +#define IAR_MACLONGADDRS0_56 0x0E +#define IAR_RX_FRAME_FILTER 0x0F +#define IAR_PLL_INT1 0x10 +#define IAR_PLL_FRAC1_LSB 0x11 +#define IAR_PLL_FRAC1_MSB 0x12 +#define IAR_MACPANID1_LSB 0x13 +#define IAR_MACPANID1_MSB 0x14 +#define IAR_MACSHORTADDRS1_LSB 0x15 +#define IAR_MACSHORTADDRS1_MSB 0x16 +#define IAR_MACLONGADDRS1_0 0x17 +#define IAR_MACLONGADDRS1_8 0x18 +#define IAR_MACLONGADDRS1_16 0x19 +#define IAR_MACLONGADDRS1_24 0x1A +#define IAR_MACLONGADDRS1_32 0x1B +#define IAR_MACLONGADDRS1_40 0x1C +#define IAR_MACLONGADDRS1_48 0x1D +#define IAR_MACLONGADDRS1_56 0x1E +#define IAR_DUAL_PAN_CTRL 0x1F +#define IAR_DUAL_PAN_DWELL 0x20 +#define IAR_DUAL_PAN_STS 0x21 +#define IAR_CCA1_THRESH 0x22 +#define IAR_CCA1_ED_OFFSET_COMP 0x23 +#define IAR_LQI_OFFSET_COMP 0x24 +#define IAR_CCA_CTRL 0x25 +#define IAR_CCA2_CORR_PEAKS 0x26 +#define IAR_CCA2_CORR_THRESH 0x27 +#define IAR_TMR_PRESCALE 0x28 +/*-------------------- 0x29 */ +#define IAR_GPIO_DATA 0x2A +#define IAR_GPIO_DIR 0x2B +#define IAR_GPIO_PUL_EN 0x2C +#define IAR_GPIO_PUL_SEL 0x2D +#define IAR_GPIO_DS 0x2E +/*------------------ 0x2F */ +#define IAR_ANT_PAD_CTRL 0x30 +#define IAR_MISC_PAD_CTRL 0x31 +#define IAR_BSM_CTRL 0x32 +/*------------------- 0x33 */ +#define IAR_RNG 0x34 +#define IAR_RX_BYTE_COUNT 0x35 +#define IAR_RX_WTR_MARK 0x36 +#define IAR_SOFT_RESET 0x37 +#define IAR_TXDELAY 0x38 +#define IAR_ACKDELAY 0x39 +#define IAR_SEQ_MGR_CTRL 0x3A +#define IAR_SEQ_MGR_STS 0x3B +#define IAR_SEQ_T_STS 0x3C +#define IAR_ABORT_STS 0x3D +#define IAR_CCCA_BUSY_CNT 0x3E +#define IAR_SRC_ADDR_CHECKSUM1 0x3F +#define IAR_SRC_ADDR_CHECKSUM2 0x40 +#define IAR_SRC_TBL_VALID1 0x41 +#define IAR_SRC_TBL_VALID2 0x42 +#define IAR_FILTERFAIL_CODE1 0x43 +#define IAR_FILTERFAIL_CODE2 0x44 +#define IAR_SLOT_PRELOAD 0x45 +/*-------------------- 0x46 */ +#define IAR_CORR_VT 0x47 +#define IAR_SYNC_CTRL 0x48 +#define IAR_PN_LSB_0 0x49 +#define IAR_PN_LSB_1 0x4A +#define IAR_PN_MSB_0 0x4B +#define IAR_PN_MSB_1 0x4C +#define IAR_CORR_NVAL 0x4D +#define IAR_TX_MODE_CTRL 0x4E +#define IAR_SNF_THR 0x4F +#define IAR_FAD_THR 0x50 +#define IAR_ANT_AGC_CTRL 0x51 +#define IAR_AGC_THR1 0x52 +#define IAR_AGC_THR2 0x53 +#define IAR_AGC_HYS 0x54 +#define IAR_AFC 0x55 +/*------------------- 0x56 */ +/*------------------- 0x57 */ +#define IAR_PHY_STS 0x58 +#define IAR_RX_MAX_CORR 0x59 +#define IAR_RX_MAX_PREAMBLE 0x5A +#define IAR_RSSI 0x5B +/*------------------- 0x5C */ +/*------------------- 0x5D */ +#define IAR_PLL_DIG_CTRL 0x5E +#define IAR_VCO_CAL 0x5F +#define IAR_VCO_BEST_DIFF 0x60 +#define IAR_VCO_BIAS 0x61 +#define IAR_KMOD_CTRL 0x62 +#define IAR_KMOD_CAL 0x63 +#define IAR_PA_CAL 0x64 +#define IAR_PA_PWRCAL 0x65 +#define IAR_ATT_RSSI1 0x66 +#define IAR_ATT_RSSI2 0x67 +#define IAR_RSSI_OFFSET 0x68 +#define IAR_RSSI_SLOPE 0x69 +#define IAR_RSSI_CAL1 0x6A +#define IAR_RSSI_CAL2 0x6B +/*------------------- 0x6C */ +/*------------------- 0x6D */ +#define IAR_XTAL_CTRL 0x6E +#define IAR_XTAL_COMP_MIN 0x6F +#define IAR_XTAL_COMP_MAX 0x70 +#define IAR_XTAL_GM 0x71 +/*------------------- 0x72 */ +/*------------------- 0x73 */ +#define IAR_LNA_TUNE 0x74 +#define IAR_LNA_AGCGAIN 0x75 +/*------------------- 0x76 */ +/*------------------- 0x77 */ +#define IAR_CHF_PMA_GAIN 0x78 +#define IAR_CHF_IBUF 0x79 +#define IAR_CHF_QBUF 0x7A +#define IAR_CHF_IRIN 0x7B +#define IAR_CHF_QRIN 0x7C +#define IAR_CHF_IL 0x7D +#define IAR_CHF_QL 0x7E +#define IAR_CHF_CC1 0x7F +#define IAR_CHF_CCL 0x80 +#define IAR_CHF_CC2 0x81 +#define IAR_CHF_IROUT 0x82 +#define IAR_CHF_QROUT 0x83 +/*------------------- 0x84 */ +/*------------------- 0x85 */ +#define IAR_RSSI_CTRL 0x86 +/*------------------- 0x87 */ +/*------------------- 0x88 */ +#define IAR_PA_BIAS 0x89 +#define IAR_PA_TUNING 0x8A +/*------------------- 0x8B */ +/*------------------- 0x8C */ +#define IAR_PMC_HP_TRIM 0x8D +#define IAR_VREGA_TRIM 0x8E +/*------------------- 0x8F */ +/*------------------- 0x90 */ +#define IAR_VCO_CTRL1 0x91 +#define IAR_VCO_CTRL2 0x92 +/*------------------- 0x93 */ +/*------------------- 0x94 */ +#define IAR_ANA_SPARE_OUT1 0x95 +#define IAR_ANA_SPARE_OUT2 0x96 +#define IAR_ANA_SPARE_IN 0x97 +#define IAR_MISCELLANEOUS 0x98 +/*------------------- 0x99 */ +#define IAR_SEQ_MGR_OVRD0 0x9A +#define IAR_SEQ_MGR_OVRD1 0x9B +#define IAR_SEQ_MGR_OVRD2 0x9C +#define IAR_SEQ_MGR_OVRD3 0x9D +#define IAR_SEQ_MGR_OVRD4 0x9E +#define IAR_SEQ_MGR_OVRD5 0x9F +#define IAR_SEQ_MGR_OVRD6 0xA0 +#define IAR_SEQ_MGR_OVRD7 0xA1 +/*------------------- 0xA2 */ +#define IAR_TESTMODE_CTRL 0xA3 +#define IAR_DTM_CTRL1 0xA4 +#define IAR_DTM_CTRL2 0xA5 +#define IAR_ATM_CTRL1 0xA6 +#define IAR_ATM_CTRL2 0xA7 +#define IAR_ATM_CTRL3 0xA8 +/*------------------- 0xA9 */ +#define IAR_LIM_FE_TEST_CTRL 0xAA +#define IAR_CHF_TEST_CTRL 0xAB +#define IAR_VCO_TEST_CTRL 0xAC +#define IAR_PLL_TEST_CTRL 0xAD +#define IAR_PA_TEST_CTRL 0xAE +#define IAR_PMC_TEST_CTRL 0xAF +#define IAR_SCAN_DTM_PROTECT_1 0xFE +#define IAR_SCAN_DTM_PROTECT_0 0xFF + +/* IRQSTS1 bits */ +#define DAR_IRQSTS1_RX_FRM_PEND BIT(7) +#define DAR_IRQSTS1_PLL_UNLOCK_IRQ BIT(6) +#define DAR_IRQSTS1_FILTERFAIL_IRQ BIT(5) +#define DAR_IRQSTS1_RXWTRMRKIRQ BIT(4) +#define DAR_IRQSTS1_CCAIRQ BIT(3) +#define DAR_IRQSTS1_RXIRQ BIT(2) +#define DAR_IRQSTS1_TXIRQ BIT(1) +#define DAR_IRQSTS1_SEQIRQ BIT(0) + +/* IRQSTS2 bits */ +#define DAR_IRQSTS2_CRCVALID BIT(7) +#define DAR_IRQSTS2_CCA BIT(6) +#define DAR_IRQSTS2_SRCADDR BIT(5) +#define DAR_IRQSTS2_PI BIT(4) +#define DAR_IRQSTS2_TMRSTATUS BIT(3) +#define DAR_IRQSTS2_ASM_IRQ BIT(2) +#define DAR_IRQSTS2_PB_ERR_IRQ BIT(1) +#define DAR_IRQSTS2_WAKE_IRQ BIT(0) + +/* IRQSTS3 bits */ +#define DAR_IRQSTS3_TMR4MSK BIT(7) +#define DAR_IRQSTS3_TMR3MSK BIT(6) +#define DAR_IRQSTS3_TMR2MSK BIT(5) +#define DAR_IRQSTS3_TMR1MSK BIT(4) +#define DAR_IRQSTS3_TMR4IRQ BIT(3) +#define DAR_IRQSTS3_TMR3IRQ BIT(2) +#define DAR_IRQSTS3_TMR2IRQ BIT(1) +#define DAR_IRQSTS3_TMR1IRQ BIT(0) + +/* PHY_CTRL1 bits */ +#define DAR_PHY_CTRL1_TMRTRIGEN BIT(7) +#define DAR_PHY_CTRL1_SLOTTED BIT(6) +#define DAR_PHY_CTRL1_CCABFRTX BIT(5) +#define DAR_PHY_CTRL1_CCABFRTX_SHIFT 5 +#define DAR_PHY_CTRL1_RXACKRQD BIT(4) +#define DAR_PHY_CTRL1_AUTOACK BIT(3) +#define DAR_PHY_CTRL1_XCVSEQ_MASK 0x07 + +/* PHY_CTRL2 bits */ +#define DAR_PHY_CTRL2_CRC_MSK BIT(7) +#define DAR_PHY_CTRL2_PLL_UNLOCK_MSK BIT(6) +#define DAR_PHY_CTRL2_FILTERFAIL_MSK BIT(5) +#define DAR_PHY_CTRL2_RX_WMRK_MSK BIT(4) +#define DAR_PHY_CTRL2_CCAMSK BIT(3) +#define DAR_PHY_CTRL2_RXMSK BIT(2) +#define DAR_PHY_CTRL2_TXMSK BIT(1) +#define DAR_PHY_CTRL2_SEQMSK BIT(0) + +/* PHY_CTRL3 bits */ +#define DAR_PHY_CTRL3_TMR4CMP_EN BIT(7) +#define DAR_PHY_CTRL3_TMR3CMP_EN BIT(6) +#define DAR_PHY_CTRL3_TMR2CMP_EN BIT(5) +#define DAR_PHY_CTRL3_TMR1CMP_EN BIT(4) +#define DAR_PHY_CTRL3_ASM_MSK BIT(2) +#define DAR_PHY_CTRL3_PB_ERR_MSK BIT(1) +#define DAR_PHY_CTRL3_WAKE_MSK BIT(0) + +/* RX_FRM_LEN bits */ +#define DAR_RX_FRAME_LENGTH_MASK (0x7F) + +/* PHY_CTRL4 bits */ +#define DAR_PHY_CTRL4_TRCV_MSK BIT(7) +#define DAR_PHY_CTRL4_TC3TMOUT BIT(6) +#define DAR_PHY_CTRL4_PANCORDNTR0 BIT(5) +#define DAR_PHY_CTRL4_CCATYPE (3) +#define DAR_PHY_CTRL4_CCATYPE_SHIFT (3) +#define DAR_PHY_CTRL4_CCATYPE_MASK (0x18) +#define DAR_PHY_CTRL4_TMRLOAD BIT(2) +#define DAR_PHY_CTRL4_PROMISCUOUS BIT(1) +#define DAR_PHY_CTRL4_TC2PRIME_EN BIT(0) + +/* SRC_CTRL bits */ +#define DAR_SRC_CTRL_INDEX (0x0F) +#define DAR_SRC_CTRL_INDEX_SHIFT (4) +#define DAR_SRC_CTRL_ACK_FRM_PND BIT(3) +#define DAR_SRC_CTRL_SRCADDR_EN BIT(2) +#define DAR_SRC_CTRL_INDEX_EN BIT(1) +#define DAR_SRC_CTRL_INDEX_DISABLE BIT(0) + +/* DAR_ASM_CTRL1 bits */ +#define DAR_ASM_CTRL1_CLEAR BIT(7) +#define DAR_ASM_CTRL1_START BIT(6) +#define DAR_ASM_CTRL1_SELFTST BIT(5) +#define DAR_ASM_CTRL1_CTR BIT(4) +#define DAR_ASM_CTRL1_CBC BIT(3) +#define DAR_ASM_CTRL1_AES BIT(2) +#define DAR_ASM_CTRL1_LOAD_MAC BIT(1) + +/* DAR_ASM_CTRL2 bits */ +#define DAR_ASM_CTRL2_DATA_REG_TYPE_SEL (7) +#define DAR_ASM_CTRL2_DATA_REG_TYPE_SEL_SHIFT (5) +#define DAR_ASM_CTRL2_TSTPAS BIT(1) + +/* DAR_CLK_OUT_CTRL bits */ +#define DAR_CLK_OUT_CTRL_EXTEND BIT(7) +#define DAR_CLK_OUT_CTRL_HIZ BIT(6) +#define DAR_CLK_OUT_CTRL_SR BIT(5) +#define DAR_CLK_OUT_CTRL_DS BIT(4) +#define DAR_CLK_OUT_CTRL_EN BIT(3) +#define DAR_CLK_OUT_CTRL_DIV (7) + +/* DAR_PWR_MODES bits */ +#define DAR_PWR_MODES_XTAL_READY BIT(5) +#define DAR_PWR_MODES_XTALEN BIT(4) +#define DAR_PWR_MODES_ASM_CLK_EN BIT(3) +#define DAR_PWR_MODES_AUTODOZE BIT(1) +#define DAR_PWR_MODES_PMC_MODE BIT(0) + +/* RX_FRAME_FILTER bits */ +#define IAR_RX_FRAME_FLT_FRM_VER (0xC0) +#define IAR_RX_FRAME_FLT_FRM_VER_SHIFT (6) +#define IAR_RX_FRAME_FLT_ACTIVE_PROMISCUOUS BIT(5) +#define IAR_RX_FRAME_FLT_NS_FT BIT(4) +#define IAR_RX_FRAME_FLT_CMD_FT BIT(3) +#define IAR_RX_FRAME_FLT_ACK_FT BIT(2) +#define IAR_RX_FRAME_FLT_DATA_FT BIT(1) +#define IAR_RX_FRAME_FLT_BEACON_FT BIT(0) + +/* DUAL_PAN_CTRL bits */ +#define IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK (0xF0) +#define IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT (4) +#define IAR_DUAL_PAN_CTRL_CURRENT_NETWORK BIT(3) +#define IAR_DUAL_PAN_CTRL_PANCORDNTR1 BIT(2) +#define IAR_DUAL_PAN_CTRL_DUAL_PAN_AUTO BIT(1) +#define IAR_DUAL_PAN_CTRL_ACTIVE_NETWORK BIT(0) + +/* DUAL_PAN_STS bits */ +#define IAR_DUAL_PAN_STS_RECD_ON_PAN1 BIT(7) +#define IAR_DUAL_PAN_STS_RECD_ON_PAN0 BIT(6) +#define IAR_DUAL_PAN_STS_DUAL_PAN_REMAIN (0x3F) + +/* CCA_CTRL bits */ +#define IAR_CCA_CTRL_AGC_FRZ_EN BIT(6) +#define IAR_CCA_CTRL_CONT_RSSI_EN BIT(5) +#define IAR_CCA_CTRL_LQI_RSSI_NOT_CORR BIT(4) +#define IAR_CCA_CTRL_CCA3_AND_NOT_OR BIT(3) +#define IAR_CCA_CTRL_POWER_COMP_EN_LQI BIT(2) +#define IAR_CCA_CTRL_POWER_COMP_EN_ED BIT(1) +#define IAR_CCA_CTRL_POWER_COMP_EN_CCA1 BIT(0) + +/* ANT_PAD_CTRL bits */ +#define IAR_ANT_PAD_CTRL_ANTX_POL (0x0F) +#define IAR_ANT_PAD_CTRL_ANTX_POL_SHIFT (4) +#define IAR_ANT_PAD_CTRL_ANTX_CTRLMODE BIT(3) +#define IAR_ANT_PAD_CTRL_ANTX_HZ BIT(2) +#define IAR_ANT_PAD_CTRL_ANTX_EN (3) + +/* MISC_PAD_CTRL bits */ +#define IAR_MISC_PAD_CTRL_MISO_HIZ_EN BIT(3) +#define IAR_MISC_PAD_CTRL_IRQ_B_OD BIT(2) +#define IAR_MISC_PAD_CTRL_NON_GPIO_DS BIT(1) +#define IAR_MISC_PAD_CTRL_ANTX_CURR (1) + +/* ANT_AGC_CTRL bits */ +#define IAR_ANT_AGC_CTRL_FAD_EN_SHIFT (0) +#define IAR_ANT_AGC_CTRL_FAD_EN_MASK (1) +#define IAR_ANT_AGC_CTRL_ANTX_SHIFT (1) +#define IAR_ANT_AGC_CTRL_ANTX_MASK BIT(AR_ANT_AGC_CTRL_ANTX_SHIFT) + +/* BSM_CTRL bits */ +#define BSM_CTRL_BSM_EN (1) + +/* SOFT_RESET bits */ +#define IAR_SOFT_RESET_SOG_RST BIT(7) +#define IAR_SOFT_RESET_REGS_RST BIT(4) +#define IAR_SOFT_RESET_PLL_RST BIT(3) +#define IAR_SOFT_RESET_TX_RST BIT(2) +#define IAR_SOFT_RESET_RX_RST BIT(1) +#define IAR_SOFT_RESET_SEQ_MGR_RST BIT(0) + +/* SEQ_MGR_CTRL bits */ +#define IAR_SEQ_MGR_CTRL_SEQ_STATE_CTRL (3) +#define IAR_SEQ_MGR_CTRL_SEQ_STATE_CTRL_SHIFT (6) +#define IAR_SEQ_MGR_CTRL_NO_RX_RECYCLE BIT(5) +#define IAR_SEQ_MGR_CTRL_LATCH_PREAMBLE BIT(4) +#define IAR_SEQ_MGR_CTRL_EVENT_TMR_DO_NOT_LATCH BIT(3) +#define IAR_SEQ_MGR_CTRL_CLR_NEW_SEQ_INHIBIT BIT(2) +#define IAR_SEQ_MGR_CTRL_PSM_LOCK_DIS BIT(1) +#define IAR_SEQ_MGR_CTRL_PLL_ABORT_OVRD BIT(0) + +/* SEQ_MGR_STS bits */ +#define IAR_SEQ_MGR_STS_TMR2_SEQ_TRIG_ARMED BIT(7) +#define IAR_SEQ_MGR_STS_RX_MODE BIT(6) +#define IAR_SEQ_MGR_STS_RX_TIMEOUT_PENDING BIT(5) +#define IAR_SEQ_MGR_STS_NEW_SEQ_INHIBIT BIT(4) +#define IAR_SEQ_MGR_STS_SEQ_IDLE BIT(3) +#define IAR_SEQ_MGR_STS_XCVSEQ_ACTUAL (7) + +/* ABORT_STS bits */ +#define IAR_ABORT_STS_PLL_ABORTED BIT(2) +#define IAR_ABORT_STS_TC3_ABORTED BIT(1) +#define IAR_ABORT_STS_SW_ABORTED BIT(0) + +/* IAR_FILTERFAIL_CODE2 bits */ +#define IAR_FILTERFAIL_CODE2_PAN_SEL BIT(7) +#define IAR_FILTERFAIL_CODE2_9_8 (3) + +/* PHY_STS bits */ +#define IAR_PHY_STS_PLL_UNLOCK BIT(7) +#define IAR_PHY_STS_PLL_LOCK_ERR BIT(6) +#define IAR_PHY_STS_PLL_LOCK BIT(5) +#define IAR_PHY_STS_CRCVALID BIT(3) +#define IAR_PHY_STS_FILTERFAIL_FLAG_SEL BIT(2) +#define IAR_PHY_STS_SFD_DET BIT(1) +#define IAR_PHY_STS_PREAMBLE_DET BIT(0) + +/* TESTMODE_CTRL bits */ +#define IAR_TEST_MODE_CTRL_HOT_ANT BIT(4) +#define IAR_TEST_MODE_CTRL_IDEAL_RSSI_EN BIT(3) +#define IAR_TEST_MODE_CTRL_IDEAL_PFC_EN BIT(2) +#define IAR_TEST_MODE_CTRL_CONTINUOUS_EN BIT(1) +#define IAR_TEST_MODE_CTRL_FPGA_EN BIT(0) + +/* DTM_CTRL1 bits */ +#define IAR_DTM_CTRL1_ATM_LOCKED BIT(7) +#define IAR_DTM_CTRL1_DTM_EN BIT(6) +#define IAR_DTM_CTRL1_PAGE5 BIT(5) +#define IAR_DTM_CTRL1_PAGE4 BIT(4) +#define IAR_DTM_CTRL1_PAGE3 BIT(3) +#define IAR_DTM_CTRL1_PAGE2 BIT(2) +#define IAR_DTM_CTRL1_PAGE1 BIT(1) +#define IAR_DTM_CTRL1_PAGE0 BIT(0) + +/* TX_MODE_CTRL */ +#define IAR_TX_MODE_CTRL_TX_INV BIT(4) +#define IAR_TX_MODE_CTRL_BT_EN BIT(3) +#define IAR_TX_MODE_CTRL_DTS2 BIT(2) +#define IAR_TX_MODE_CTRL_DTS1 BIT(1) +#define IAR_TX_MODE_CTRL_DTS0 BIT(0) + +#define TX_MODE_CTRL_DTS_MASK (7) + +#endif /* _MCR20A_H */ diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 0008da7e9d4c..5f2897ec0edc 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -330,6 +330,7 @@ static int __init ifb_init_module(void) { int i, err; + down_write(&pernet_ops_rwsem); rtnl_lock(); err = __rtnl_link_register(&ifb_link_ops); if (err < 0) @@ -344,6 +345,7 @@ static int __init ifb_init_module(void) out: rtnl_unlock(); + up_write(&pernet_ops_rwsem); return err; } diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 5166575a164d..adb826f55e60 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -74,6 +74,7 @@ struct ipvl_dev { DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); netdev_features_t sfeatures; u32 msg_enable; + spinlock_t addrs_lock; }; struct ipvl_addr { @@ -176,4 +177,10 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, void ipvlan_link_delete(struct net_device *dev, struct list_head *head); void ipvlan_link_setup(struct net_device *dev); int ipvlan_link_register(struct rtnl_link_ops *ops); + +static inline bool netif_is_ipvlan_port(const struct net_device *dev) +{ + return rcu_access_pointer(dev->rx_handler) == ipvlan_handle_frame; +} + #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index c1f008fe4e1d..1a8132eb2a3e 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -35,6 +35,7 @@ void ipvlan_count_rx(const struct ipvl_dev *ipvlan, } EXPORT_SYMBOL_GPL(ipvlan_count_rx); +#if IS_ENABLED(CONFIG_IPV6) static u8 ipvlan_get_v6_hash(const void *iaddr) { const struct in6_addr *ip6_addr = iaddr; @@ -42,6 +43,12 @@ static u8 ipvlan_get_v6_hash(const void *iaddr) return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & IPVLAN_HASH_MASK; } +#else +static u8 ipvlan_get_v6_hash(const void *iaddr) +{ + return 0; +} +#endif static u8 ipvlan_get_v4_hash(const void *iaddr) { @@ -51,6 +58,23 @@ static u8 ipvlan_get_v4_hash(const void *iaddr) IPVLAN_HASH_MASK; } +static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr) +{ + if (!is_v6 && addr->atype == IPVL_IPV4) { + struct in_addr *i4addr = (struct in_addr *)iaddr; + + return addr->ip4addr.s_addr == i4addr->s_addr; +#if IS_ENABLED(CONFIG_IPV6) + } else if (is_v6 && addr->atype == IPVL_IPV6) { + struct in6_addr *i6addr = (struct in6_addr *)iaddr; + + return ipv6_addr_equal(&addr->ip6addr, i6addr); +#endif + } + + return false; +} + static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, const void *iaddr, bool is_v6) { @@ -59,15 +83,9 @@ static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : ipvlan_get_v4_hash(iaddr); - hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) { - if (is_v6 && addr->atype == IPVL_IPV6 && - ipv6_addr_equal(&addr->ip6addr, iaddr)) - return addr; - else if (!is_v6 && addr->atype == IPVL_IPV4 && - addr->ip4addr.s_addr == - ((struct in_addr *)iaddr)->s_addr) + hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) + if (addr_equal(is_v6, addr, iaddr)) return addr; - } return NULL; } @@ -91,29 +109,33 @@ void ipvlan_ht_addr_del(struct ipvl_addr *addr) struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, const void *iaddr, bool is_v6) { - struct ipvl_addr *addr; + struct ipvl_addr *addr, *ret = NULL; - list_for_each_entry(addr, &ipvlan->addrs, anode) { - if ((is_v6 && addr->atype == IPVL_IPV6 && - ipv6_addr_equal(&addr->ip6addr, iaddr)) || - (!is_v6 && addr->atype == IPVL_IPV4 && - addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) - return addr; + rcu_read_lock(); + list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) { + if (addr_equal(is_v6, addr, iaddr)) { + ret = addr; + break; + } } - return NULL; + rcu_read_unlock(); + return ret; } bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) { struct ipvl_dev *ipvlan; + bool ret = false; - ASSERT_RTNL(); - - list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) - return true; + rcu_read_lock(); + list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { + if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) { + ret = true; + break; + } } - return false; + rcu_read_unlock(); + return ret; } static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) @@ -150,6 +172,7 @@ static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int lyr3h = ip4h; break; } +#if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): { struct ipv6hdr *ip6h; @@ -188,6 +211,7 @@ static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int } break; } +#endif default: return NULL; } @@ -337,14 +361,18 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, { struct ipvl_addr *addr = NULL; - if (addr_type == IPVL_IPV6) { + switch (addr_type) { +#if IS_ENABLED(CONFIG_IPV6) + case IPVL_IPV6: { struct ipv6hdr *ip6h; struct in6_addr *i6addr; ip6h = (struct ipv6hdr *)lyr3h; i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; addr = ipvlan_ht_addr_lookup(port, i6addr, true); - } else if (addr_type == IPVL_ICMPV6) { + break; + } + case IPVL_ICMPV6: { struct nd_msg *ndmh; struct in6_addr *i6addr; @@ -356,14 +384,19 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, i6addr = &ndmh->target; addr = ipvlan_ht_addr_lookup(port, i6addr, true); } - } else if (addr_type == IPVL_IPV4) { + break; + } +#endif + case IPVL_IPV4: { struct iphdr *ip4h; __be32 *i4addr; ip4h = (struct iphdr *)lyr3h; i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; addr = ipvlan_ht_addr_lookup(port, i4addr, false); - } else if (addr_type == IPVL_ARP) { + break; + } + case IPVL_ARP: { struct arphdr *arph; unsigned char *arp_ptr; __be32 dip; @@ -377,6 +410,8 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, memcpy(&dip, arp_ptr, 4); addr = ipvlan_ht_addr_lookup(port, &dip, false); + break; + } } return addr; @@ -420,6 +455,7 @@ out: return ret; } +#if IS_ENABLED(CONFIG_IPV6) static int ipvlan_process_v6_outbound(struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); @@ -456,6 +492,12 @@ err: out: return ret; } +#else +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + return NET_XMIT_DROP; +} +#endif static int ipvlan_process_outbound(struct sk_buff *skb) { @@ -464,8 +506,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) /* In this mode we dont care about multicast and broadcast traffic */ if (is_multicast_ether_addr(ethh->h_dest)) { - pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n", - ntohs(skb->protocol)); + pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n", + ntohs(skb->protocol)); kfree_skb(skb); goto out; } @@ -759,6 +801,7 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, goto out; break; } +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct dst_entry *dst; @@ -774,10 +817,12 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, }; skb_dst_drop(skb); - dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags); + dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, + skb, flags); skb_dst_set(skb, dst); break; } +#endif default: break; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 2469df118fbf..450eec264a5e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -22,12 +22,14 @@ static const struct nf_hook_ops ipvl_nfops[] = { .hooknum = NF_INET_LOCAL_IN, .priority = INT_MAX, }, +#if IS_ENABLED(CONFIG_IPV6) { .hook = ipvlan_nf_input, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = INT_MAX, }, +#endif }; static const struct l3mdev_ops ipvl_l3mdev_ops = { @@ -127,7 +129,6 @@ static int ipvlan_port_create(struct net_device *dev) if (err) goto err; - dev->priv_flags |= IFF_IPVLAN_MASTER; return 0; err: @@ -140,7 +141,6 @@ static void ipvlan_port_destroy(struct net_device *dev) struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct sk_buff *skb; - dev->priv_flags &= ~IFF_IPVLAN_MASTER; if (port->mode == IPVLAN_MODE_L3S) { dev->priv_flags &= ~IFF_L3MDEV_MASTER; ipvlan_unregister_nf_hook(dev_net(dev)); @@ -176,7 +176,7 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX; + dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; @@ -225,8 +225,10 @@ static int ipvlan_open(struct net_device *dev) else dev->flags &= ~IFF_NOARP; - list_for_each_entry(addr, &ipvlan->addrs, anode) + rcu_read_lock(); + list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_add(ipvlan, addr); + rcu_read_unlock(); return dev_uc_add(phy_dev, phy_dev->dev_addr); } @@ -242,8 +244,10 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_del(phy_dev, phy_dev->dev_addr); - list_for_each_entry(addr, &ipvlan->addrs, anode) + rcu_read_lock(); + list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_del(addr); + rcu_read_unlock(); return 0; } @@ -417,6 +421,12 @@ static const struct header_ops ipvlan_header_ops = { .cache_update = eth_header_cache_update, }; +static bool netif_is_ipvlan(const struct net_device *dev) +{ + /* both ipvlan and ipvtap devices use the same netdev_ops */ + return dev->netdev_ops == &ipvlan_netdev_ops; +} + static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { @@ -586,6 +596,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->sfeatures = IPVLAN_FEATURES; ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); + spin_lock_init(&ipvlan->addrs_lock); /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing @@ -593,7 +604,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, */ memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN); - dev->priv_flags |= IFF_IPVLAN_SLAVE; + dev->priv_flags |= IFF_NO_RX_HANDLER; err = register_netdevice(dev); if (err < 0) @@ -663,11 +674,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head) struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_addr *addr, *next; + spin_lock_bh(&ipvlan->addrs_lock); list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { ipvlan_ht_addr_del(addr); - list_del(&addr->anode); + list_del_rcu(&addr->anode); kfree_rcu(addr, rcu); } + spin_unlock_bh(&ipvlan->addrs_lock); ida_simple_remove(&ipvlan->port->ida, dev->dev_id); list_del_rcu(&ipvlan->pnode); @@ -758,8 +771,7 @@ static int ipvlan_device_event(struct notifier_block *unused, if (dev->reg_state != NETREG_UNREGISTERING) break; - list_for_each_entry_safe(ipvlan, next, &port->ipvlans, - pnode) + list_for_each_entry_safe(ipvlan, next, &port->ipvlans, pnode) ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev, &lst_kill); unregister_netdevice_many(&lst_kill); @@ -791,6 +803,7 @@ static int ipvlan_device_event(struct notifier_block *unused, return NOTIFY_DONE; } +/* the caller must held the addrs lock */ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; @@ -800,14 +813,17 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) return -ENOMEM; addr->master = ipvlan; - if (is_v6) { - memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr)); - addr->atype = IPVL_IPV6; - } else { + if (!is_v6) { memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr)); addr->atype = IPVL_IPV4; +#if IS_ENABLED(CONFIG_IPV6) + } else { + memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr)); + addr->atype = IPVL_IPV6; +#endif } - list_add_tail(&addr->anode, &ipvlan->addrs); + + list_add_tail_rcu(&addr->anode, &ipvlan->addrs); /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. @@ -822,32 +838,17 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; + spin_lock_bh(&ipvlan->addrs_lock); addr = ipvlan_find_addr(ipvlan, iaddr, is_v6); - if (!addr) + if (!addr) { + spin_unlock_bh(&ipvlan->addrs_lock); return; + } ipvlan_ht_addr_del(addr); - list_del(&addr->anode); + list_del_rcu(&addr->anode); + spin_unlock_bh(&ipvlan->addrs_lock); kfree_rcu(addr, rcu); - - return; -} - -static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) -{ - if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { - netif_err(ipvlan, ifup, ipvlan->dev, - "Failed to add IPv6=%pI6c addr for %s intf\n", - ip6_addr, ipvlan->dev->name); - return -EINVAL; - } - - return ipvlan_add_addr(ipvlan, ip6_addr, true); -} - -static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) -{ - return ipvlan_del_addr(ipvlan, ip6_addr, true); } static bool ipvlan_is_valid_dev(const struct net_device *dev) @@ -863,6 +864,27 @@ static bool ipvlan_is_valid_dev(const struct net_device *dev) return true; } +#if IS_ENABLED(CONFIG_IPV6) +static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + int ret = -EINVAL; + + spin_lock_bh(&ipvlan->addrs_lock); + if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv6=%pI6c addr for %s intf\n", + ip6_addr, ipvlan->dev->name); + else + ret = ipvlan_add_addr(ipvlan, ip6_addr, true); + spin_unlock_bh(&ipvlan->addrs_lock); + return ret; +} + +static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + return ipvlan_del_addr(ipvlan, ip6_addr, true); +} + static int ipvlan_addr6_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -894,10 +916,6 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); - /* FIXME IPv6 autoconf calls us from bh without RTNL */ - if (in_softirq()) - return NOTIFY_DONE; - if (!ipvlan_is_valid_dev(dev)) return NOTIFY_DONE; @@ -913,17 +931,21 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused, return NOTIFY_OK; } +#endif static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { - if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { + int ret = -EINVAL; + + spin_lock_bh(&ipvlan->addrs_lock); + if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) netif_err(ipvlan, ifup, ipvlan->dev, "Failed to add IPv4=%pI4 on %s intf.\n", ip4_addr, ipvlan->dev->name); - return -EINVAL; - } - - return ipvlan_add_addr(ipvlan, ip4_addr, false); + else + ret = ipvlan_add_addr(ipvlan, ip4_addr, false); + spin_unlock_bh(&ipvlan->addrs_lock); + return ret; } static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) @@ -993,6 +1015,7 @@ static struct notifier_block ipvlan_notifier_block __read_mostly = { .notifier_call = ipvlan_device_event, }; +#if IS_ENABLED(CONFIG_IPV6) static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { .notifier_call = ipvlan_addr6_event, }; @@ -1000,6 +1023,7 @@ static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = { .notifier_call = ipvlan_addr6_validator_event, }; +#endif static void ipvlan_ns_exit(struct net *net) { @@ -1024,9 +1048,11 @@ static int __init ipvlan_init_module(void) ipvlan_init_secret(); register_netdevice_notifier(&ipvlan_notifier_block); +#if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&ipvlan_addr6_notifier_block); register_inet6addr_validator_notifier( &ipvlan_addr6_vtor_notifier_block); +#endif register_inetaddr_notifier(&ipvlan_addr4_notifier_block); register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block); @@ -1045,9 +1071,11 @@ error: unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); unregister_inetaddr_validator_notifier( &ipvlan_addr4_vtor_notifier_block); +#if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); unregister_inet6addr_validator_notifier( &ipvlan_addr6_vtor_notifier_block); +#endif unregister_netdevice_notifier(&ipvlan_notifier_block); return err; } @@ -1060,9 +1088,11 @@ static void __exit ipvlan_cleanup_module(void) unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); unregister_inetaddr_validator_notifier( &ipvlan_addr4_vtor_notifier_block); +#if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); unregister_inet6addr_validator_notifier( &ipvlan_addr6_vtor_notifier_block); +#endif } module_init(ipvlan_init_module); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 7de88b33d5b9..9cbb0c8a896a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) - goto unregister; + goto put_dev; /* need to be already registered so that ->init has run and * the MAC addr is set @@ -3316,7 +3316,8 @@ del_dev: macsec_del_dev(macsec); unlink: netdev_upper_dev_unlink(real_dev, dev); -unregister: +put_dev: + dev_put(real_dev); unregister_netdevice(dev); return err; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a0f2be81d52e..725f4b4afc6d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, lowerdev_features &= (features | ~NETIF_F_LRO); features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; - features &= ~NETIF_F_NETNS_LOCAL; + features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); return features; } @@ -1451,7 +1451,7 @@ destroy_macvlan_port: /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(dev)) + if (create && macvlan_port_get_rtnl(lowerdev)) macvlan_port_destroy(port->dev); return err; } diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 09388c06171d..449b2a1a1800 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -9,3 +9,7 @@ ifeq ($(CONFIG_BPF_SYSCALL),y) netdevsim-objs += \ bpf.o endif + +ifneq ($(CONFIG_NET_DEVLINK),) +netdevsim-objs += devlink.o fib.o +endif diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c new file mode 100644 index 000000000000..1dba47936456 --- /dev/null +++ b/drivers/net/netdevsim/devlink.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2018 Cumulus Networks. All rights reserved. + * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com> + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <linux/device.h> +#include <net/devlink.h> +#include <net/netns/generic.h> + +#include "netdevsim.h" + +static unsigned int nsim_devlink_id; + +/* place holder until devlink and namespaces is sorted out */ +static struct net *nsim_devlink_net(struct devlink *devlink) +{ + return &init_net; +} + +/* IPv4 + */ +static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink) +{ + struct net *net = nsim_devlink_net(devlink); + + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); +} + +static struct devlink_resource_ops nsim_ipv4_fib_res_ops = { + .occ_get = nsim_ipv4_fib_resource_occ_get, +}; + +static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink) +{ + struct net *net = nsim_devlink_net(devlink); + + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); +} + +static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = { + .occ_get = nsim_ipv4_fib_rules_res_occ_get, +}; + +/* IPv6 + */ +static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink) +{ + struct net *net = nsim_devlink_net(devlink); + + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); +} + +static struct devlink_resource_ops nsim_ipv6_fib_res_ops = { + .occ_get = nsim_ipv6_fib_resource_occ_get, +}; + +static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink) +{ + struct net *net = nsim_devlink_net(devlink); + + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); +} + +static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = { + .occ_get = nsim_ipv6_fib_rules_res_occ_get, +}; + +static int devlink_resources_register(struct devlink *devlink) +{ + struct devlink_resource_size_params params = { + .size_max = (u64)-1, + .size_granularity = 1, + .unit = DEVLINK_RESOURCE_UNIT_ENTRY + }; + struct net *net = nsim_devlink_net(devlink); + int err; + u64 n; + + /* Resources for IPv4 */ + err = devlink_resource_register(devlink, "IPv4", (u64)-1, + NSIM_RESOURCE_IPV4, + DEVLINK_RESOURCE_ID_PARENT_TOP, + ¶ms, NULL); + if (err) { + pr_err("Failed to register IPv4 top resource\n"); + goto out; + } + + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); + err = devlink_resource_register(devlink, "fib", n, + NSIM_RESOURCE_IPV4_FIB, + NSIM_RESOURCE_IPV4, + ¶ms, &nsim_ipv4_fib_res_ops); + if (err) { + pr_err("Failed to register IPv4 FIB resource\n"); + return err; + } + + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); + err = devlink_resource_register(devlink, "fib-rules", n, + NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV4, + ¶ms, &nsim_ipv4_fib_rules_res_ops); + if (err) { + pr_err("Failed to register IPv4 FIB rules resource\n"); + return err; + } + + /* Resources for IPv6 */ + err = devlink_resource_register(devlink, "IPv6", (u64)-1, + NSIM_RESOURCE_IPV6, + DEVLINK_RESOURCE_ID_PARENT_TOP, + ¶ms, NULL); + if (err) { + pr_err("Failed to register IPv6 top resource\n"); + goto out; + } + + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); + err = devlink_resource_register(devlink, "fib", n, + NSIM_RESOURCE_IPV6_FIB, + NSIM_RESOURCE_IPV6, + ¶ms, &nsim_ipv6_fib_res_ops); + if (err) { + pr_err("Failed to register IPv6 FIB resource\n"); + return err; + } + + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); + err = devlink_resource_register(devlink, "fib-rules", n, + NSIM_RESOURCE_IPV6_FIB_RULES, + NSIM_RESOURCE_IPV6, + ¶ms, &nsim_ipv6_fib_rules_res_ops); + if (err) { + pr_err("Failed to register IPv6 FIB rules resource\n"); + return err; + } +out: + return err; +} + +static int nsim_devlink_reload(struct devlink *devlink) +{ + enum nsim_resource_id res_ids[] = { + NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES + }; + struct net *net = nsim_devlink_net(devlink); + int i; + + for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { + int err; + u64 val; + + err = devlink_resource_size_get(devlink, res_ids[i], &val); + if (!err) { + err = nsim_fib_set_max(net, res_ids[i], val); + if (err) + return err; + } + } + + return 0; +} + +static void nsim_devlink_net_reset(struct net *net) +{ + enum nsim_resource_id res_ids[] = { + NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES + }; + int i; + + for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { + if (nsim_fib_set_max(net, res_ids[i], (u64)-1)) { + pr_err("Failed to reset limit for resource %u\n", + res_ids[i]); + } + } +} + +static const struct devlink_ops nsim_devlink_ops = { + .reload = nsim_devlink_reload, +}; + +/* once devlink / namespace issues are sorted out + * this needs to be net in which a devlink instance + * is to be created. e.g., dev_net(ns->netdev) + */ +static struct net *nsim_to_net(struct netdevsim *ns) +{ + return &init_net; +} + +void nsim_devlink_teardown(struct netdevsim *ns) +{ + if (ns->devlink) { + struct net *net = nsim_to_net(ns); + bool *reg_devlink = net_generic(net, nsim_devlink_id); + + devlink_unregister(ns->devlink); + devlink_free(ns->devlink); + ns->devlink = NULL; + + nsim_devlink_net_reset(net); + *reg_devlink = true; + } +} + +int nsim_devlink_setup(struct netdevsim *ns) +{ + struct net *net = nsim_to_net(ns); + bool *reg_devlink = net_generic(net, nsim_devlink_id); + struct devlink *devlink; + int err; + + /* only one device per namespace controls devlink */ + if (!*reg_devlink) { + ns->devlink = NULL; + return 0; + } + + devlink = devlink_alloc(&nsim_devlink_ops, 0); + if (!devlink) + return -ENOMEM; + + err = devlink_register(devlink, &ns->dev); + if (err) + goto err_devlink_free; + + err = devlink_resources_register(devlink); + if (err) + goto err_dl_unregister; + + ns->devlink = devlink; + + *reg_devlink = false; + + return 0; + +err_dl_unregister: + devlink_unregister(devlink); +err_devlink_free: + devlink_free(devlink); + + return err; +} + +/* Initialize per network namespace state */ +static int __net_init nsim_devlink_netns_init(struct net *net) +{ + bool *reg_devlink = net_generic(net, nsim_devlink_id); + + *reg_devlink = true; + + return 0; +} + +static struct pernet_operations nsim_devlink_net_ops = { + .init = nsim_devlink_netns_init, + .id = &nsim_devlink_id, + .size = sizeof(bool), +}; + +void nsim_devlink_exit(void) +{ + unregister_pernet_subsys(&nsim_devlink_net_ops); + nsim_fib_exit(); +} + +int nsim_devlink_init(void) +{ + int err; + + err = nsim_fib_init(); + if (err) + goto err_out; + + err = register_pernet_subsys(&nsim_devlink_net_ops); + if (err) + nsim_fib_exit(); + +err_out: + return err; +} diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c new file mode 100644 index 000000000000..9bfe9e151e13 --- /dev/null +++ b/drivers/net/netdevsim/fib.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2018 Cumulus Networks. All rights reserved. + * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com> + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <net/fib_notifier.h> +#include <net/ip_fib.h> +#include <net/ip6_fib.h> +#include <net/fib_rules.h> +#include <net/netns/generic.h> + +#include "netdevsim.h" + +struct nsim_fib_entry { + u64 max; + u64 num; +}; + +struct nsim_per_fib_data { + struct nsim_fib_entry fib; + struct nsim_fib_entry rules; +}; + +struct nsim_fib_data { + struct nsim_per_fib_data ipv4; + struct nsim_per_fib_data ipv6; +}; + +static unsigned int nsim_fib_net_id; + +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) +{ + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); + struct nsim_fib_entry *entry; + + switch (res_id) { + case NSIM_RESOURCE_IPV4_FIB: + entry = &fib_data->ipv4.fib; + break; + case NSIM_RESOURCE_IPV4_FIB_RULES: + entry = &fib_data->ipv4.rules; + break; + case NSIM_RESOURCE_IPV6_FIB: + entry = &fib_data->ipv6.fib; + break; + case NSIM_RESOURCE_IPV6_FIB_RULES: + entry = &fib_data->ipv6.rules; + break; + default: + return 0; + } + + return max ? entry->max : entry->num; +} + +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val) +{ + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); + struct nsim_fib_entry *entry; + int err = 0; + + switch (res_id) { + case NSIM_RESOURCE_IPV4_FIB: + entry = &fib_data->ipv4.fib; + break; + case NSIM_RESOURCE_IPV4_FIB_RULES: + entry = &fib_data->ipv4.rules; + break; + case NSIM_RESOURCE_IPV6_FIB: + entry = &fib_data->ipv6.fib; + break; + case NSIM_RESOURCE_IPV6_FIB_RULES: + entry = &fib_data->ipv6.rules; + break; + default: + return 0; + } + + /* not allowing a new max to be less than curren occupancy + * --> no means of evicting entries + */ + if (val < entry->num) + err = -EINVAL; + else + entry->max = val; + + return err; +} + +static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, + struct netlink_ext_ack *extack) +{ + int err = 0; + + if (add) { + if (entry->num < entry->max) { + entry->num++; + } else { + err = -ENOSPC; + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries"); + } + } else { + entry->num--; + } + + return err; +} + +static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add) +{ + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); + struct netlink_ext_ack *extack = info->extack; + int err = 0; + + switch (info->family) { + case AF_INET: + err = nsim_fib_rule_account(&data->ipv4.rules, add, extack); + break; + case AF_INET6: + err = nsim_fib_rule_account(&data->ipv6.rules, add, extack); + break; + } + + return err; +} + +static int nsim_fib_account(struct nsim_fib_entry *entry, bool add, + struct netlink_ext_ack *extack) +{ + int err = 0; + + if (add) { + if (entry->num < entry->max) { + entry->num++; + } else { + err = -ENOSPC; + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries"); + } + } else { + entry->num--; + } + + return err; +} + +static int nsim_fib_event(struct fib_notifier_info *info, bool add) +{ + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); + struct netlink_ext_ack *extack = info->extack; + int err = 0; + + switch (info->family) { + case AF_INET: + err = nsim_fib_account(&data->ipv4.fib, add, extack); + break; + case AF_INET6: + err = nsim_fib_account(&data->ipv6.fib, add, extack); + break; + } + + return err; +} + +static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct fib_notifier_info *info = ptr; + int err = 0; + + switch (event) { + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); + break; + + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); + break; + } + + return notifier_from_errno(err); +} + +/* inconsistent dump, trying again */ +static void nsim_fib_dump_inconsistent(struct notifier_block *nb) +{ + struct nsim_fib_data *data; + struct net *net; + + rcu_read_lock(); + for_each_net_rcu(net) { + data = net_generic(net, nsim_fib_net_id); + + data->ipv4.fib.num = 0ULL; + data->ipv4.rules.num = 0ULL; + + data->ipv6.fib.num = 0ULL; + data->ipv6.rules.num = 0ULL; + } + rcu_read_unlock(); +} + +static struct notifier_block nsim_fib_nb = { + .notifier_call = nsim_fib_event_nb, +}; + +/* Initialize per network namespace state */ +static int __net_init nsim_fib_netns_init(struct net *net) +{ + struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id); + + data->ipv4.fib.max = (u64)-1; + data->ipv4.rules.max = (u64)-1; + + data->ipv6.fib.max = (u64)-1; + data->ipv6.rules.max = (u64)-1; + + return 0; +} + +static struct pernet_operations nsim_fib_net_ops = { + .init = nsim_fib_netns_init, + .id = &nsim_fib_net_id, + .size = sizeof(struct nsim_fib_data), +}; + +void nsim_fib_exit(void) +{ + unregister_pernet_subsys(&nsim_fib_net_ops); + unregister_fib_notifier(&nsim_fib_nb); +} + +int nsim_fib_init(void) +{ + int err; + + err = register_pernet_subsys(&nsim_fib_net_ops); + if (err < 0) { + pr_err("Failed to register pernet subsystem\n"); + goto err_out; + } + + err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); + if (err < 0) { + pr_err("Failed to register fib notifier\n"); + goto err_out; + } + +err_out: + return err; +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 3fd567928f3d..ec68f38213d9 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -167,8 +167,14 @@ static int nsim_init(struct net_device *dev) SET_NETDEV_DEV(dev, &ns->dev); + err = nsim_devlink_setup(ns); + if (err) + goto err_unreg_dev; + return 0; +err_unreg_dev: + device_unregister(&ns->dev); err_bpf_uninit: nsim_bpf_uninit(ns); err_debugfs_destroy: @@ -180,6 +186,7 @@ static void nsim_uninit(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); } @@ -478,12 +485,18 @@ static int __init nsim_module_init(void) if (err) goto err_debugfs_destroy; - err = rtnl_link_register(&nsim_link_ops); + err = nsim_devlink_init(); if (err) goto err_unreg_bus; + err = rtnl_link_register(&nsim_link_ops); + if (err) + goto err_dl_fini; + return 0; +err_dl_fini: + nsim_devlink_exit(); err_unreg_bus: bus_unregister(&nsim_bus); err_debugfs_destroy: @@ -494,6 +507,7 @@ err_debugfs_destroy: static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); + nsim_devlink_exit(); bus_unregister(&nsim_bus); debugfs_remove_recursive(nsim_ddir); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index ea081c10efb8..3a8581af3b85 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -64,6 +64,9 @@ struct netdevsim { bool bpf_map_accept; struct list_head bpf_bound_maps; +#if IS_ENABLED(CONFIG_NET_DEVLINK) + struct devlink *devlink; +#endif }; extern struct dentry *nsim_ddir; @@ -103,6 +106,47 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } #endif +#if IS_ENABLED(CONFIG_NET_DEVLINK) +enum nsim_resource_id { + NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */ + NSIM_RESOURCE_IPV4, + NSIM_RESOURCE_IPV4_FIB, + NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV6, + NSIM_RESOURCE_IPV6_FIB, + NSIM_RESOURCE_IPV6_FIB_RULES, +}; + +int nsim_devlink_setup(struct netdevsim *ns); +void nsim_devlink_teardown(struct netdevsim *ns); + +int nsim_devlink_init(void); +void nsim_devlink_exit(void); + +int nsim_fib_init(void); +void nsim_fib_exit(void); +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val); +#else +static inline int nsim_devlink_setup(struct netdevsim *ns) +{ + return 0; +} + +static inline void nsim_devlink_teardown(struct netdevsim *ns) +{ +} + +static inline int nsim_devlink_init(void) +{ + return 0; +} + +static inline void nsim_devlink_exit(void) +{ +} +#endif + static inline struct netdevsim *to_nsim(struct device *ptr) { return container_of(ptr, struct netdevsim, dev); diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index e8ae50e1255e..319edc9c8ec7 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -38,14 +38,6 @@ static int aquantia_config_aneg(struct phy_device *phydev) return 0; } -static int aquantia_aneg_done(struct phy_device *phydev) -{ - int reg; - - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); - return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); -} - static int aquantia_config_intr(struct phy_device *phydev) { int err; @@ -125,7 +117,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQ1202", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, @@ -137,7 +129,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQ2104", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, @@ -149,7 +141,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQR105", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, @@ -161,7 +153,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQR106", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, @@ -173,7 +165,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQR107", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, @@ -185,7 +177,7 @@ static struct phy_driver aquantia_driver[] = { .name = "Aquantia AQR405", .features = PHY_AQUANTIA_FEATURES, .flags = PHY_HAS_INTERRUPT, - .aneg_done = aquantia_aneg_done, + .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, .ack_interrupt = aquantia_ack_interrupt, diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 171010eb4d9c..5ad130c3da43 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data) unsigned int i; for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, - bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); + strlcpy(data + i * ETH_GSTRING_LEN, + bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); } EXPORT_SYMBOL_GPL(bcm_phy_get_strings); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 421feb8f92fe..29b1c88b55cc 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -565,7 +565,7 @@ static int bcm7xxx_28nm_set_tunable(struct phy_device *phydev, if (ret) return ret; - /* Disable EEE advertisment since this prevents the PHY + /* Disable EEE advertisement since this prevents the PHY * from successfully linking up, trigger auto-negotiation restart * to let the MAC decide what to do. */ diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 9442db221834..8022cd317f62 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c @@ -30,14 +30,6 @@ static int cortina_read_reg(struct phy_device *phydev, u16 regnum) MII_ADDR_C45 | regnum); } -static int cortina_config_aneg(struct phy_device *phydev) -{ - phydev->supported = SUPPORTED_10000baseT_Full; - phydev->advertising = SUPPORTED_10000baseT_Full; - - return 0; -} - static int cortina_read_status(struct phy_device *phydev) { int gpio_int_status, ret = 0; @@ -61,11 +53,6 @@ err: return ret; } -static int cortina_soft_reset(struct phy_device *phydev) -{ - return 0; -} - static int cortina_probe(struct phy_device *phydev) { u32 phy_id = 0; @@ -101,9 +88,10 @@ static struct phy_driver cortina_driver[] = { .phy_id = PHY_ID_CS4340, .phy_id_mask = 0xffffffff, .name = "Cortina CS4340", - .config_aneg = cortina_config_aneg, + .config_init = gen10g_config_init, + .config_aneg = gen10g_config_aneg, .read_status = cortina_read_status, - .soft_reset = cortina_soft_reset, + .soft_reset = gen10g_no_soft_reset, .probe = cortina_probe, }, }; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index ab58224f897f..b3935778b19f 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -75,6 +75,8 @@ #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0 #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f +#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8) +#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8 /* CFG4 bits */ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0) @@ -92,6 +94,7 @@ struct dp83867_private { int io_impedance; int port_mirroring; bool rxctrl_strap_quirk; + int clk_output_sel; }; static int dp83867_ack_interrupt(struct phy_device *phydev) @@ -160,6 +163,14 @@ static int dp83867_of_init(struct phy_device *phydev) dp83867->io_impedance = -EINVAL; /* Optional configuration */ + ret = of_property_read_u32(of_node, "ti,clk-output-sel", + &dp83867->clk_output_sel); + if (ret || dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK) + /* Keep the default value if ti,clk-output-sel is not set + * or too high + */ + dp83867->clk_output_sel = DP83867_CLK_O_SEL_REF_CLK; + if (of_property_read_bool(of_node, "ti,max-output-impedance")) dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX; else if (of_property_read_bool(of_node, "ti,min-output-impedance")) @@ -295,6 +306,14 @@ static int dp83867_config_init(struct phy_device *phydev) if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP) dp83867_config_port_mirroring(phydev); + /* Clock output selection if muxing property is set */ + if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) { + val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG); + val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK; + val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT); + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val); + } + return 0; } diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index a11f80cb5388..7d936fb61c22 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -149,8 +149,10 @@ #define PHY_ID_PHY22F_1_4 0xD565A410 #define PHY_ID_PHY11G_1_5 0xD565A401 #define PHY_ID_PHY22F_1_5 0xD565A411 -#define PHY_ID_PHY11G_VR9 0xD565A409 -#define PHY_ID_PHY22F_VR9 0xD565A419 +#define PHY_ID_PHY11G_VR9_1_1 0xD565A408 +#define PHY_ID_PHY22F_VR9_1_1 0xD565A418 +#define PHY_ID_PHY11G_VR9_1_2 0xD565A409 +#define PHY_ID_PHY22F_VR9_1_2 0xD565A419 static int xway_gphy_config_init(struct phy_device *phydev) { @@ -312,9 +314,9 @@ static struct phy_driver xway_gphy[] = { .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = PHY_ID_PHY11G_VR9, + .phy_id = PHY_ID_PHY11G_VR9_1_1, .phy_id_mask = 0xffffffff, - .name = "Intel XWAY PHY11G (xRX integrated)", + .name = "Intel XWAY PHY11G (xRX v1.1 integrated)", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, @@ -324,9 +326,33 @@ static struct phy_driver xway_gphy[] = { .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = PHY_ID_PHY22F_VR9, + .phy_id = PHY_ID_PHY22F_VR9_1_1, .phy_id_mask = 0xffffffff, - .name = "Intel XWAY PHY22F (xRX integrated)", + .name = "Intel XWAY PHY22F (xRX v1.1 integrated)", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY11G_VR9_1_2, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY11G (xRX v1.2 integrated)", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY22F_VR9_1_2, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY22F (xRX v1.2 integrated)", .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, @@ -346,8 +372,10 @@ static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = { { PHY_ID_PHY22F_1_4, 0xffffffff }, { PHY_ID_PHY11G_1_5, 0xffffffff }, { PHY_ID_PHY22F_1_5, 0xffffffff }, - { PHY_ID_PHY11G_VR9, 0xffffffff }, - { PHY_ID_PHY22F_VR9, 0xffffffff }, + { PHY_ID_PHY11G_VR9_1_1, 0xffffffff }, + { PHY_ID_PHY22F_VR9_1_1, 0xffffffff }, + { PHY_ID_PHY11G_VR9_1_2, 0xffffffff }, + { PHY_ID_PHY22F_VR9_1_2, 0xffffffff }, { } }; MODULE_DEVICE_TABLE(mdio, xway_gphy_tbl); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 22d9bc9c33a4..a75c511950c3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -860,7 +860,7 @@ static int m88e1510_config_init(struct phy_device *phydev) return err; /* There appears to be a bug in the 88e1512 when used in - * SGMII to copper mode, where the AN advertisment register + * SGMII to copper mode, where the AN advertisement register * clears the pause bits each time a negotiation occurs. * This means we can never be truely sure what was advertised, * so disable Pause support. @@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) int i; for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { - memcpy(data + i * ETH_GSTRING_LEN, - marvell_hw_stats[i].string, ETH_GSTRING_LEN); + strlcpy(data + i * ETH_GSTRING_LEN, + marvell_hw_stats[i].string, ETH_GSTRING_LEN); } } diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 8a0bd98fdec7..f77a2d9e7f9d 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -21,8 +21,10 @@ * If both the fiber and copper ports are connected, the first to gain * link takes priority and the other port is completely locked out. */ -#include <linux/phy.h> +#include <linux/ctype.h> +#include <linux/hwmon.h> #include <linux/marvell_phy.h> +#include <linux/phy.h> enum { MV_PCS_BASE_T = 0x0000, @@ -40,6 +42,19 @@ enum { */ MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */ MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */ + + /* Vendor2 MMD registers */ + MV_V2_TEMP_CTRL = 0xf08a, + MV_V2_TEMP_CTRL_MASK = 0xc000, + MV_V2_TEMP_CTRL_SAMPLE = 0x0000, + MV_V2_TEMP_CTRL_DISABLE = 0xc000, + MV_V2_TEMP = 0xf08c, + MV_V2_TEMP_UNKNOWN = 0x9600, /* unknown function */ +}; + +struct mv3310_priv { + struct device *hwmon_dev; + char *hwmon_name; }; static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, @@ -60,26 +75,180 @@ static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, return ret < 0 ? ret : 1; } +#ifdef CONFIG_HWMON +static umode_t mv3310_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type == hwmon_chip && attr == hwmon_chip_update_interval) + return 0444; + if (type == hwmon_temp && attr == hwmon_temp_input) + return 0444; + return 0; +} + +static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int temp; + + if (type == hwmon_chip && attr == hwmon_chip_update_interval) { + *value = MSEC_PER_SEC; + return 0; + } + + if (type == hwmon_temp && attr == hwmon_temp_input) { + temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); + if (temp < 0) + return temp; + + *value = ((temp & 0xff) - 75) * 1000; + + return 0; + } + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops mv3310_hwmon_ops = { + .is_visible = mv3310_hwmon_is_visible, + .read = mv3310_hwmon_read, +}; + +static u32 mv3310_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL, + 0, +}; + +static const struct hwmon_channel_info mv3310_hwmon_chip = { + .type = hwmon_chip, + .config = mv3310_hwmon_chip_config, +}; + +static u32 mv3310_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0, +}; + +static const struct hwmon_channel_info mv3310_hwmon_temp = { + .type = hwmon_temp, + .config = mv3310_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *mv3310_hwmon_info[] = { + &mv3310_hwmon_chip, + &mv3310_hwmon_temp, + NULL, +}; + +static const struct hwmon_chip_info mv3310_hwmon_chip_info = { + .ops = &mv3310_hwmon_ops, + .info = mv3310_hwmon_info, +}; + +static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) +{ + u16 val; + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP, + MV_V2_TEMP_UNKNOWN); + if (ret < 0) + return ret; + + val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE; + ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, + MV_V2_TEMP_CTRL_MASK, val); + + return ret < 0 ? ret : 0; +} + +static void mv3310_hwmon_disable(void *data) +{ + struct phy_device *phydev = data; + + mv3310_hwmon_config(phydev, false); +} + +static int mv3310_hwmon_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + int i, j, ret; + + priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!priv->hwmon_name) + return -ENODEV; + + for (i = j = 0; priv->hwmon_name[i]; i++) { + if (isalnum(priv->hwmon_name[i])) { + if (i != j) + priv->hwmon_name[j] = priv->hwmon_name[i]; + j++; + } + } + priv->hwmon_name[j] = '\0'; + + ret = mv3310_hwmon_config(phydev, true); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev); + if (ret) + return ret; + + priv->hwmon_dev = devm_hwmon_device_register_with_info(dev, + priv->hwmon_name, phydev, + &mv3310_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(priv->hwmon_dev); +} +#else +static inline int mv3310_hwmon_config(struct phy_device *phydev, bool enable) +{ + return 0; +} + +static int mv3310_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + static int mv3310_probe(struct phy_device *phydev) { + struct mv3310_priv *priv; u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN; + int ret; if (!phydev->is_c45 || (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) return -ENODEV; + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev_set_drvdata(&phydev->mdio.dev, priv); + + ret = mv3310_hwmon_probe(phydev); + if (ret) + return ret; + return 0; } -/* - * Resetting the MV88X3310 causes it to become non-responsive. Avoid - * setting the reset bit(s). - */ -static int mv3310_soft_reset(struct phy_device *phydev) +static int mv3310_suspend(struct phy_device *phydev) { return 0; } +static int mv3310_resume(struct phy_device *phydev) +{ + return mv3310_hwmon_config(phydev, true); +} + static int mv3310_config_init(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; @@ -317,7 +486,7 @@ static int mv3310_read_status(struct phy_device *phydev) if (val < 0) return val; - /* Read the link partner's 1G advertisment */ + /* Read the link partner's 1G advertisement */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_STAT1000); if (val < 0) return val; @@ -376,9 +545,11 @@ static struct phy_driver mv3310_drivers[] = { SUPPORTED_FIBRE | SUPPORTED_10000baseT_Full | SUPPORTED_Backplane, - .probe = mv3310_probe, - .soft_reset = mv3310_soft_reset, + .soft_reset = gen10g_no_soft_reset, .config_init = mv3310_config_init, + .probe = mv3310_probe, + .suspend = mv3310_suspend, + .resume = mv3310_resume, .config_aneg = mv3310_config_aneg, .aneg_done = mv3310_aneg_done, .read_status = mv3310_read_status, diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 61a543c788cc..403b085f0a89 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -113,7 +113,7 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) for (i = 0; i < 32; i++) mdiobb_send_bit(ctrl, 1); - /* send the start bit (01) and the read opcode (10) or write (10). + /* send the start bit (01) and the read opcode (10) or write (01). Clause 45 operation uses 00 for the start and 11, 10 for read/write */ mdiobb_send_bit(ctrl, 0); diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 2573ab012f16..70f6115530af 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -163,8 +163,9 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) mdio_mux_mmioreg_switch_fn, &s->mux_handle, s, NULL); if (ret) { - dev_err(&pdev->dev, "failed to register mdio-mux bus %pOF\n", - np); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to register mdio-mux bus %pOF\n", np); return ret; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0f45310300f6..f41b224a9cdb 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev) return 0; } -/* This routine returns -1 as an indication to the caller that the - * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE - * MMD extended PHY registers. - */ -static int -ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum) -{ - return -1; -} - -/* This routine does nothing since the Micrel ksz9021 does not support - * standard IEEE MMD extended PHY registers. - */ -static int -ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val) -{ - return -1; -} - static int kszphy_get_sset_count(struct phy_device *phydev) { return ARRAY_SIZE(kszphy_hw_stats); @@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data) int i; for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { - memcpy(data + i * ETH_GSTRING_LEN, - kszphy_hw_stats[i].string, ETH_GSTRING_LEN); + strlcpy(data + i * ETH_GSTRING_LEN, + kszphy_hw_stats[i].string, ETH_GSTRING_LEN); } } @@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .read_mmd = ksz9021_rd_mmd_phyreg, - .write_mmd = ksz9021_wr_mmd_phyreg, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index a4576859afae..e1225545362d 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -163,11 +163,11 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) EXPORT_SYMBOL_GPL(genphy_c45_read_link); /** - * genphy_c45_read_lpa - read the link partner advertisment and pause + * genphy_c45_read_lpa - read the link partner advertisement and pause * @phydev: target phy_device struct * * Read the Clause 45 defined base (7.19) and 10G (7.33) status registers, - * filling in the link partner advertisment, pause and asym_pause members + * filling in the link partner advertisement, pause and asym_pause members * in @phydev. This assumes that the auto-negotiation MMD is present, and * the backplane bit (7.48.0) is clear. Clause 45 PHY drivers are expected * to fill in the remainder of the link partner advert from vendor registers. @@ -176,7 +176,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; - /* Read the link partner's base page advertisment */ + /* Read the link partner's base page advertisement */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); if (val < 0) return val; @@ -185,7 +185,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; - /* Read the link partner's 10G advertisment */ + /* Read the link partner's 10G advertisement */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); if (val < 0) return val; @@ -268,12 +268,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); /* The gen10g_* functions are the old Clause 45 stub */ -static int gen10g_config_aneg(struct phy_device *phydev) +int gen10g_config_aneg(struct phy_device *phydev) { return 0; } +EXPORT_SYMBOL_GPL(gen10g_config_aneg); -static int gen10g_read_status(struct phy_device *phydev) +int gen10g_read_status(struct phy_device *phydev) { u32 mmd_mask = phydev->c45_ids.devices_in_package; int ret; @@ -291,14 +292,16 @@ static int gen10g_read_status(struct phy_device *phydev) return 0; } +EXPORT_SYMBOL_GPL(gen10g_read_status); -static int gen10g_soft_reset(struct phy_device *phydev) +int gen10g_no_soft_reset(struct phy_device *phydev) { /* Do nothing for now */ return 0; } +EXPORT_SYMBOL_GPL(gen10g_no_soft_reset); -static int gen10g_config_init(struct phy_device *phydev) +int gen10g_config_init(struct phy_device *phydev) { /* Temporarily just say we support everything */ phydev->supported = SUPPORTED_10000baseT_Full; @@ -306,22 +309,25 @@ static int gen10g_config_init(struct phy_device *phydev) return 0; } +EXPORT_SYMBOL_GPL(gen10g_config_init); -static int gen10g_suspend(struct phy_device *phydev) +int gen10g_suspend(struct phy_device *phydev) { return 0; } +EXPORT_SYMBOL_GPL(gen10g_suspend); -static int gen10g_resume(struct phy_device *phydev) +int gen10g_resume(struct phy_device *phydev) { return 0; } +EXPORT_SYMBOL_GPL(gen10g_resume); struct phy_driver genphy_10g_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, .name = "Generic 10G PHY", - .soft_reset = gen10g_soft_reset, + .soft_reset = gen10g_no_soft_reset, .config_init = gen10g_config_init, .features = 0, .config_aneg = gen10g_config_aneg, diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 4083f00c97a5..c7da4cbb1103 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -190,10 +190,10 @@ size_t phy_speeds(unsigned int *speeds, size_t size, } /** - * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings + * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings * @phydev: The phy_device struct * - * Resolve our and the link partner advertisments into their corresponding + * Resolve our and the link partner advertisements into their corresponding * speed and duplex. If full duplex was negotiated, extract the pause mode * from the link partner mask. */ diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e3e29c2b028b..05c1e8ef15e6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -618,6 +618,68 @@ static void phy_error(struct phy_device *phydev) } /** + * phy_disable_interrupts - Disable the PHY interrupts from the PHY side + * @phydev: target phy_device struct + */ +static int phy_disable_interrupts(struct phy_device *phydev) +{ + int err; + + /* Disable PHY interrupts */ + err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); + if (err) + return err; + + /* Clear the interrupt */ + return phy_clear_interrupt(phydev); +} + +/** + * phy_change - Called by the phy_interrupt to handle PHY changes + * @phydev: phy_device struct that interrupted + */ +static irqreturn_t phy_change(struct phy_device *phydev) +{ + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + return IRQ_NONE; + + if (phydev->state == PHY_HALTED) + if (phy_disable_interrupts(phydev)) + goto phy_err; + } + + mutex_lock(&phydev->lock); + if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) + phydev->state = PHY_CHANGELINK; + mutex_unlock(&phydev->lock); + + /* reschedule state queue work to run as soon as possible */ + phy_trigger_machine(phydev, true); + + if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) + goto phy_err; + return IRQ_HANDLED; + +phy_err: + phy_error(phydev); + return IRQ_NONE; +} + +/** + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes + * @work: work_struct that describes the work to be done + */ +void phy_change_work(struct work_struct *work) +{ + struct phy_device *phydev = + container_of(work, struct phy_device, phy_queue); + + phy_change(phydev); +} + +/** * phy_interrupt - PHY interrupt handler * @irq: interrupt line * @phy_dat: phy_device pointer @@ -632,9 +694,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) if (PHY_HALTED == phydev->state) return IRQ_NONE; /* It can't be ours. */ - phy_change(phydev); - - return IRQ_HANDLED; + return phy_change(phydev); } /** @@ -652,32 +712,6 @@ static int phy_enable_interrupts(struct phy_device *phydev) } /** - * phy_disable_interrupts - Disable the PHY interrupts from the PHY side - * @phydev: target phy_device struct - */ -static int phy_disable_interrupts(struct phy_device *phydev) -{ - int err; - - /* Disable PHY interrupts */ - err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); - if (err) - goto phy_err; - - /* Clear the interrupt */ - err = phy_clear_interrupt(phydev); - if (err) - goto phy_err; - - return 0; - -phy_err: - phy_error(phydev); - - return err; -} - -/** * phy_start_interrupts - request and enable interrupts for a PHY device * @phydev: target phy_device struct * @@ -720,50 +754,6 @@ int phy_stop_interrupts(struct phy_device *phydev) EXPORT_SYMBOL(phy_stop_interrupts); /** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted - */ -void phy_change(struct phy_device *phydev) -{ - if (phy_interrupt_is_valid(phydev)) { - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - return; - - if (phydev->state == PHY_HALTED) - if (phy_disable_interrupts(phydev)) - goto phy_err; - } - - mutex_lock(&phydev->lock); - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) - phydev->state = PHY_CHANGELINK; - mutex_unlock(&phydev->lock); - - /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev, true); - - if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) - goto phy_err; - return; - -phy_err: - phy_error(phydev); -} - -/** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); - - phy_change(phydev); -} - -/** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct */ @@ -774,13 +764,8 @@ void phy_stop(struct phy_device *phydev) if (PHY_HALTED == phydev->state) goto out_unlock; - if (phy_interrupt_is_valid(phydev)) { - /* Disable PHY Interrupts */ - phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); - - /* Clear any pending interrupts */ - phy_clear_interrupt(phydev); - } + if (phy_interrupt_is_valid(phydev)) + phy_disable_interrupts(phydev); phydev->state = PHY_HALTED; @@ -819,7 +804,7 @@ void phy_start(struct phy_device *phydev) break; case PHY_HALTED: /* if phy was suspended, bring the physical link up again */ - phy_resume(phydev); + __phy_resume(phydev); /* make sure interrupts are re-enabled for the PHY */ if (phy_interrupt_is_valid(phydev)) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b13eed21c87d..ac23322a32e1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev) if (!mdio_bus_phy_may_suspend(phydev)) goto no_resume; - mutex_lock(&phydev->lock); ret = phy_resume(phydev); - mutex_unlock(&phydev->lock); if (ret < 0) return ret; @@ -376,7 +374,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, dev->duplex = -1; dev->pause = 0; dev->asym_pause = 0; - dev->link = 1; + dev->link = 0; dev->interface = PHY_INTERFACE_MODE_GMII; dev->autoneg = AUTONEG_ENABLE; @@ -1014,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, "attached_dev"); if (!err) { - err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, - "phydev"); - if (err) - goto error; + err = sysfs_create_link_nowarn(&dev->dev.kobj, + &phydev->mdio.dev.kobj, + "phydev"); + if (err) { + dev_err(&dev->dev, "could not add device link to %s err %d\n", + kobject_name(&phydev->mdio.dev.kobj), + err); + /* non-fatal - some net drivers can use one netdevice + * with more then one phy + */ + } phydev->sysfs_links = true; } @@ -1041,9 +1046,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (err) goto error; - mutex_lock(&phydev->lock); phy_resume(phydev); - mutex_unlock(&phydev->lock); phy_led_triggers_register(phydev); return err; @@ -1172,7 +1175,7 @@ int phy_suspend(struct phy_device *phydev) } EXPORT_SYMBOL(phy_suspend); -int phy_resume(struct phy_device *phydev) +int __phy_resume(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; @@ -1189,6 +1192,18 @@ int phy_resume(struct phy_device *phydev) return ret; } +EXPORT_SYMBOL(__phy_resume); + +int phy_resume(struct phy_device *phydev) +{ + int ret; + + mutex_lock(&phydev->lock); + ret = __phy_resume(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) @@ -1382,7 +1397,7 @@ int genphy_setup_forced(struct phy_device *phydev) ctl |= BMCR_FULLDPLX; return phy_modify(phydev, MII_BMCR, - BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); + ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl); } EXPORT_SYMBOL(genphy_setup_forced); @@ -1658,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev) } EXPORT_SYMBOL(genphy_config_init); +/* This is used for the phy device which doesn't support the MMD extended + * register access, but it does have side effect when we are trying to access + * the MMD register via indirect method. + */ +int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum) +{ + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(genphy_read_mmd_unsupported); + +int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, + u16 regnum, u16 val) +{ + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(genphy_write_mmd_unsupported); + int genphy_suspend(struct phy_device *phydev) { return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 6ac8b29b2dc3..c582b2d7546c 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -364,7 +364,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat } /* Flow control is resolved according to our and the link partners - * advertisments using the following drawn from the 802.3 specs: + * advertisements using the following drawn from the 802.3 specs: * Local device Link partner * Pause AsymDir Pause AsymDir Result * 1 X 1 X TX+RX @@ -470,10 +470,12 @@ static void phylink_resolve(struct work_struct *w) if (link_state.link != netif_carrier_ok(ndev)) { if (!link_state.link) { netif_carrier_off(ndev); - pl->ops->mac_link_down(ndev, pl->link_an_mode); + pl->ops->mac_link_down(ndev, pl->link_an_mode, + pl->phy_state.interface); netdev_info(ndev, "Link is Down\n"); } else { pl->ops->mac_link_up(ndev, pl->link_an_mode, + pl->phy_state.interface, pl->phydev); netif_carrier_on(ndev); @@ -679,12 +681,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) mutex_lock(&phy->lock); mutex_lock(&pl->state_mutex); - pl->netdev->phydev = phy; pl->phydev = phy; linkmode_copy(pl->supported, supported); linkmode_copy(pl->link_config.advertising, config.advertising); - /* Restrict the phy advertisment according to the MAC support. */ + /* Restrict the phy advertisement according to the MAC support. */ ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); phy->advertising = advertising; mutex_unlock(&pl->state_mutex); @@ -817,7 +818,6 @@ void phylink_disconnect_phy(struct phylink *pl) if (phy) { mutex_lock(&phy->lock); mutex_lock(&pl->state_mutex); - pl->netdev->phydev = NULL; pl->phydev = NULL; mutex_unlock(&pl->state_mutex); mutex_unlock(&phy->lock); @@ -889,7 +889,7 @@ void phylink_start(struct phylink *pl) /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also - * ensures that we set the appropriate advertisment for Serdes links. + * ensures that we set the appropriate advertisement for Serdes links. */ phylink_resolve_flow(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config); @@ -1076,7 +1076,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, config = pl->link_config; - /* Mask out unsupported advertisments */ + /* Mask out unsupported advertisements */ linkmode_and(config.advertising, kset->link_modes.advertising, pl->supported); @@ -1121,7 +1121,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, if (phylink_validate(pl, pl->supported, &config)) return -EINVAL; - /* If autonegotiation is enabled, we must have an advertisment */ + /* If autonegotiation is enabled, we must have an advertisement */ if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) return -EINVAL; @@ -1250,34 +1250,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); -int phylink_ethtool_get_module_info(struct phylink *pl, - struct ethtool_modinfo *modinfo) -{ - int ret = -EOPNOTSUPP; - - WARN_ON(!lockdep_rtnl_is_held()); - - if (pl->sfp_bus) - ret = sfp_get_module_info(pl->sfp_bus, modinfo); - - return ret; -} -EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_info); - -int phylink_ethtool_get_module_eeprom(struct phylink *pl, - struct ethtool_eeprom *ee, u8 *buf) -{ - int ret = -EOPNOTSUPP; - - WARN_ON(!lockdep_rtnl_is_held()); - - if (pl->sfp_bus) - ret = sfp_get_module_eeprom(pl->sfp_bus, ee, buf); - - return ret; -} -EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom); - /** * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error * counter @@ -1584,25 +1556,14 @@ static int phylink_sfp_module_insert(void *upstream, bool changed; u8 port; - sfp_parse_support(pl->sfp_bus, id, support); - port = sfp_parse_port(pl->sfp_bus, id, support); - iface = sfp_parse_interface(pl->sfp_bus, id); - ASSERT_RTNL(); - switch (iface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - case PHY_INTERFACE_MODE_10GKR: - break; - default: - return -EINVAL; - } + sfp_parse_support(pl->sfp_bus, id, support); + port = sfp_parse_port(pl->sfp_bus, id, support); memset(&config, 0, sizeof(config)); linkmode_copy(config.advertising, support); - config.interface = iface; + config.interface = PHY_INTERFACE_MODE_NA; config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; config.pause = MLO_PAUSE_AN; @@ -1611,6 +1572,22 @@ static int phylink_sfp_module_insert(void *upstream, /* Ignore errors if we're expecting a PHY to attach later */ ret = phylink_validate(pl, support, &config); if (ret) { + netdev_err(pl->netdev, "validation with support %*pb failed: %d\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + return ret; + } + + iface = sfp_select_interface(pl->sfp_bus, id, config.advertising); + if (iface == PHY_INTERFACE_MODE_NA) { + netdev_err(pl->netdev, + "selection of interface failed, advertisement %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising); + return -EINVAL; + } + + config.interface = iface; + ret = phylink_validate(pl, support, &config); + if (ret) { netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface), diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index ee3ca4a2f12b..9f48ecf9c627 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 8961209ee949..0381da78d228 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -106,68 +106,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, EXPORT_SYMBOL_GPL(sfp_parse_port); /** - * sfp_parse_interface() - Parse the phy_interface_t - * @bus: a pointer to the &struct sfp_bus structure for the sfp module - * @id: a pointer to the module's &struct sfp_eeprom_id - * - * Derive the phy_interface_t mode for the information found in the - * module's identifying EEPROM. There is no standard or defined way - * to derive this information, so we use some heuristics. - * - * If the encoding is 64b66b, then the module must be >= 10G, so - * return %PHY_INTERFACE_MODE_10GKR. - * - * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre - * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return - * %PHY_INTERFACE_MODE_SGMII mode. - * - * If the encoding is not known, return %PHY_INTERFACE_MODE_NA. - */ -phy_interface_t sfp_parse_interface(struct sfp_bus *bus, - const struct sfp_eeprom_id *id) -{ - phy_interface_t iface; - - /* Setting the serdes link mode is guesswork: there's no field in - * the EEPROM which indicates what mode should be used. - * - * If the module wants 64b66b, then it must be >= 10G. - * - * If it's a gigabit-only fiber module, it probably does not have - * a PHY, so switch to 802.3z negotiation mode. Otherwise, switch - * to SGMII mode (which is required to support non-gigabit speeds). - */ - switch (id->base.encoding) { - case SFP_ENCODING_8472_64B66B: - iface = PHY_INTERFACE_MODE_10GKR; - break; - - case SFP_ENCODING_8B10B: - if (!id->base.e1000_base_t && - !id->base.e100_base_lx && - !id->base.e100_base_fx) - iface = PHY_INTERFACE_MODE_1000BASEX; - else - iface = PHY_INTERFACE_MODE_SGMII; - break; - - default: - if (id->base.e1000_base_cx) { - iface = PHY_INTERFACE_MODE_1000BASEX; - break; - } - - iface = PHY_INTERFACE_MODE_NA; - dev_err(bus->sfp_dev, - "SFP module encoding does not support 8b10b nor 64b66b\n"); - break; - } - - return iface; -} -EXPORT_SYMBOL_GPL(sfp_parse_interface); - -/** * sfp_parse_support() - Parse the eeprom id for supported link modes * @bus: a pointer to the &struct sfp_bus structure for the sfp module * @id: a pointer to the module's &struct sfp_eeprom_id @@ -180,10 +118,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { unsigned int br_min, br_nom, br_max; - - phylink_set(support, Autoneg); - phylink_set(support, Pause); - phylink_set(support, Asym_Pause); + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; /* Decode the bitrate information to MBd */ br_min = br_nom = br_max = 0; @@ -201,20 +136,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, /* Set ethtool support from the compliance fields. */ if (id->base.e10g_base_sr) - phylink_set(support, 10000baseSR_Full); + phylink_set(modes, 10000baseSR_Full); if (id->base.e10g_base_lr) - phylink_set(support, 10000baseLR_Full); + phylink_set(modes, 10000baseLR_Full); if (id->base.e10g_base_lrm) - phylink_set(support, 10000baseLRM_Full); + phylink_set(modes, 10000baseLRM_Full); if (id->base.e10g_base_er) - phylink_set(support, 10000baseER_Full); + phylink_set(modes, 10000baseER_Full); if (id->base.e1000_base_sx || id->base.e1000_base_lx || id->base.e1000_base_cx) - phylink_set(support, 1000baseX_Full); + phylink_set(modes, 1000baseX_Full); if (id->base.e1000_base_t) { - phylink_set(support, 1000baseT_Half); - phylink_set(support, 1000baseT_Full); + phylink_set(modes, 1000baseT_Half); + phylink_set(modes, 1000baseT_Full); } /* 1000Base-PX or 1000Base-BX10 */ @@ -228,20 +163,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) { /* This may look odd, but some manufacturers use 12000MBd */ if (br_min <= 12000 && br_max >= 10300) - phylink_set(support, 10000baseCR_Full); + phylink_set(modes, 10000baseCR_Full); if (br_min <= 3200 && br_max >= 3100) - phylink_set(support, 2500baseX_Full); + phylink_set(modes, 2500baseX_Full); if (br_min <= 1300 && br_max >= 1200) - phylink_set(support, 1000baseX_Full); + phylink_set(modes, 1000baseX_Full); } if (id->base.sfp_ct_passive) { if (id->base.passive.sff8431_app_e) - phylink_set(support, 10000baseCR_Full); + phylink_set(modes, 10000baseCR_Full); } if (id->base.sfp_ct_active) { if (id->base.active.sff8431_app_e || id->base.active.sff8431_lim) { - phylink_set(support, 10000baseCR_Full); + phylink_set(modes, 10000baseCR_Full); } } @@ -249,18 +184,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case 0x00: /* Unspecified */ break; case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */ - phylink_set(support, 100000baseSR4_Full); - phylink_set(support, 25000baseSR_Full); + phylink_set(modes, 100000baseSR4_Full); + phylink_set(modes, 25000baseSR_Full); break; case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */ case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */ - phylink_set(support, 100000baseLR4_ER4_Full); + phylink_set(modes, 100000baseLR4_ER4_Full); break; case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */ case 0x0c: /* 25Gbase-CR CA-S */ case 0x0d: /* 25Gbase-CR CA-N */ - phylink_set(support, 100000baseCR4_Full); - phylink_set(support, 25000baseCR_Full); + phylink_set(modes, 100000baseCR4_Full); + phylink_set(modes, 25000baseCR_Full); break; default: dev_warn(bus->sfp_dev, @@ -274,13 +209,70 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, id->base.fc_speed_200 || id->base.fc_speed_400) { if (id->base.br_nominal >= 31) - phylink_set(support, 2500baseX_Full); + phylink_set(modes, 2500baseX_Full); if (id->base.br_nominal >= 12) - phylink_set(support, 1000baseX_Full); + phylink_set(modes, 1000baseX_Full); } + + /* If we haven't discovered any modes that this module supports, try + * the encoding and bitrate to determine supported modes. Some BiDi + * modules (eg, 1310nm/1550nm) are not 1000BASE-BX compliant due to + * the differing wavelengths, so do not set any transceiver bits. + */ + if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) { + /* If the encoding and bit rate allows 1000baseX */ + if (id->base.encoding == SFP_ENCODING_8B10B && br_nom && + br_min <= 1300 && br_max >= 1200) + phylink_set(modes, 1000baseX_Full); + } + + bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); + + phylink_set(support, Autoneg); + phylink_set(support, Pause); + phylink_set(support, Asym_Pause); } EXPORT_SYMBOL_GPL(sfp_parse_support); +/** + * sfp_select_interface() - Select appropriate phy_interface_t mode + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @id: a pointer to the module's &struct sfp_eeprom_id + * @link_modes: ethtool link modes mask + * + * Derive the phy_interface_t mode for the information found in the + * module's identifying EEPROM and the link modes mask. There is no + * standard or defined way to derive this information, so we decide + * based upon the link mode mask. + */ +phy_interface_t sfp_select_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *link_modes) +{ + if (phylink_test(link_modes, 10000baseCR_Full) || + phylink_test(link_modes, 10000baseSR_Full) || + phylink_test(link_modes, 10000baseLR_Full) || + phylink_test(link_modes, 10000baseLRM_Full) || + phylink_test(link_modes, 10000baseER_Full)) + return PHY_INTERFACE_MODE_10GKR; + + if (phylink_test(link_modes, 2500baseX_Full)) + return PHY_INTERFACE_MODE_2500BASEX; + + if (id->base.e1000_base_t || + id->base.e100_base_lx || + id->base.e100_base_fx) + return PHY_INTERFACE_MODE_SGMII; + + if (phylink_test(link_modes, 1000baseX_Full)) + return PHY_INTERFACE_MODE_1000BASEX; + + dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n"); + + return PHY_INTERFACE_MODE_NA; +} +EXPORT_SYMBOL_GPL(sfp_select_interface); + static LIST_HEAD(sfp_buses); static DEFINE_MUTEX(sfp_mutex); @@ -350,6 +342,7 @@ static int sfp_register_bus(struct sfp_bus *bus) } if (bus->started) bus->socket_ops->start(bus->sfp); + bus->netdev->sfp_bus = bus; bus->registered = true; return 0; } @@ -364,6 +357,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } + bus->netdev->sfp_bus = NULL; bus->registered = false; } @@ -379,8 +373,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus) */ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) { - if (!bus->registered) - return -ENOIOCTLCMD; return bus->socket_ops->module_info(bus->sfp, modinfo); } EXPORT_SYMBOL_GPL(sfp_get_module_info); @@ -399,8 +391,6 @@ EXPORT_SYMBOL_GPL(sfp_get_module_info); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data) { - if (!bus->registered) - return -ENOIOCTLCMD; return bus->socket_ops->module_eeprom(bus->sfp, ee, data); } EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 6c7d9289078d..4ab6e9a50bbe 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -42,6 +42,7 @@ enum { SFP_MOD_EMPTY = 0, SFP_MOD_PROBE, + SFP_MOD_HPOWER, SFP_MOD_PRESENT, SFP_MOD_ERROR, @@ -86,6 +87,7 @@ static const enum gpiod_flags gpio_flags[] = { * access the I2C EEPROM. However, Avago modules require 300ms. */ #define T_PROBE_INIT msecs_to_jiffies(300) +#define T_HPOWER_LEVEL msecs_to_jiffies(300) #define T_PROBE_RETRY msecs_to_jiffies(100) /* SFP modules appear to always have their PHY configured for bus address @@ -110,10 +112,12 @@ struct sfp { struct sfp_bus *sfp_bus; struct phy_device *mod_phy; const struct sff_data *type; + u32 max_power_mW; unsigned int (*get_state)(struct sfp *); void (*set_state)(struct sfp *, unsigned int); int (*read)(struct sfp *, bool, u8, void *, size_t); + int (*write)(struct sfp *, bool, u8, void *, size_t); struct gpio_desc *gpio[GPIO_MAX]; @@ -201,10 +205,11 @@ static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) } } -static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr, - void *buf, size_t len) +static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, + size_t len) { struct i2c_msg msgs[2]; + u8 bus_addr = a2 ? 0x51 : 0x50; int ret; msgs[0].addr = bus_addr; @@ -216,17 +221,38 @@ static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr, msgs[1].len = len; msgs[1].buf = buf; - ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); if (ret < 0) return ret; return ret == ARRAY_SIZE(msgs) ? len : 0; } -static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf, - size_t len) +static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, + size_t len) { - return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len); + struct i2c_msg msgs[1]; + u8 bus_addr = a2 ? 0x51 : 0x50; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1 + len; + msgs[0].buf = kmalloc(1 + len, GFP_KERNEL); + if (!msgs[0].buf) + return -ENOMEM; + + msgs[0].buf[0] = dev_addr; + memcpy(&msgs[0].buf[1], buf, len); + + ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); + + kfree(msgs[0].buf); + + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? len : 0; } static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) @@ -239,6 +265,7 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) sfp->i2c = i2c; sfp->read = sfp_i2c_read; + sfp->write = sfp_i2c_write; i2c_mii = mdio_i2c_alloc(sfp->dev, i2c); if (IS_ERR(i2c_mii)) @@ -274,6 +301,11 @@ static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) return sfp->read(sfp, a2, addr, buf, len); } +static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +{ + return sfp->write(sfp, a2, addr, buf, len); +} + static unsigned int sfp_check(void *buf, size_t len) { u8 *p, check; @@ -462,41 +494,124 @@ static void sfp_sm_mod_init(struct sfp *sfp) sfp_sm_probe_phy(sfp); } +static int sfp_sm_mod_hpower(struct sfp *sfp) +{ + u32 power; + u8 val; + int err; + + power = 1000; + if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL)) + power = 1500; + if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL)) + power = 2000; + + if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE && + (sfp->id.ext.diagmon & (SFP_DIAGMON_DDM | SFP_DIAGMON_ADDRMODE)) != + SFP_DIAGMON_DDM) { + /* The module appears not to implement bus address 0xa2, + * or requires an address change sequence, so assume that + * the module powers up in the indicated power mode. + */ + if (power > sfp->max_power_mW) { + dev_err(sfp->dev, + "Host does not support %u.%uW modules\n", + power / 1000, (power / 100) % 10); + return -EINVAL; + } + return 0; + } + + if (power > sfp->max_power_mW) { + dev_warn(sfp->dev, + "Host does not support %u.%uW modules, module left in power mode 1\n", + power / 1000, (power / 100) % 10); + return 0; + } + + if (power <= 1000) + return 0; + + err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val)); + if (err != sizeof(val)) { + dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err); + err = -EAGAIN; + goto err; + } + + val |= BIT(0); + + err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val)); + if (err != sizeof(val)) { + dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err); + err = -EAGAIN; + goto err; + } + + dev_info(sfp->dev, "Module switched to %u.%uW power level\n", + power / 1000, (power / 100) % 10); + return T_HPOWER_LEVEL; + +err: + return err; +} + static int sfp_sm_mod_probe(struct sfp *sfp) { /* SFP module inserted - read I2C data */ struct sfp_eeprom_id id; + bool cotsworks; u8 check; - int err; + int ret; - err = sfp_read(sfp, false, 0, &id, sizeof(id)); - if (err < 0) { - dev_err(sfp->dev, "failed to read EEPROM: %d\n", err); + ret = sfp_read(sfp, false, 0, &id, sizeof(id)); + if (ret < 0) { + dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); return -EAGAIN; } - if (err != sizeof(id)) { - dev_err(sfp->dev, "EEPROM short read: %d\n", err); + if (ret != sizeof(id)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", ret); return -EAGAIN; } + /* Cotsworks do not seem to update the checksums when they + * do the final programming with the final module part number, + * serial number and date code. + */ + cotsworks = !memcmp(id.base.vendor_name, "COTSWORKS ", 16); + /* Validate the checksum over the base structure */ check = sfp_check(&id.base, sizeof(id.base) - 1); if (check != id.base.cc_base) { - dev_err(sfp->dev, - "EEPROM base structure checksum failure: 0x%02x\n", - check); - print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, - 16, 1, &id, sizeof(id.base) - 1, true); - return -EINVAL; + if (cotsworks) { + dev_warn(sfp->dev, + "EEPROM base structure checksum failure (0x%02x != 0x%02x)\n", + check, id.base.cc_base); + } else { + dev_err(sfp->dev, + "EEPROM base structure checksum failure: 0x%02x != 0x%02x\n", + check, id.base.cc_base); + print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, + 16, 1, &id, sizeof(id), true); + return -EINVAL; + } } check = sfp_check(&id.ext, sizeof(id.ext) - 1); if (check != id.ext.cc_ext) { - dev_err(sfp->dev, - "EEPROM extended structure checksum failure: 0x%02x\n", - check); - memset(&id.ext, 0, sizeof(id.ext)); + if (cotsworks) { + dev_warn(sfp->dev, + "EEPROM extended structure checksum failure (0x%02x != 0x%02x)\n", + check, id.ext.cc_ext); + } else { + dev_err(sfp->dev, + "EEPROM extended structure checksum failure: 0x%02x != 0x%02x\n", + check, id.ext.cc_ext); + print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, + 16, 1, &id, sizeof(id), true); + memset(&id.ext, 0, sizeof(id.ext)); + } } sfp->id = id; @@ -521,7 +636,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp) dev_warn(sfp->dev, "module address swap to access page 0xA2 is not supported.\n"); - return sfp_module_insert(sfp->sfp_bus, &sfp->id); + ret = sfp_module_insert(sfp->sfp_bus, &sfp->id); + if (ret < 0) + return ret; + + return sfp_sm_mod_hpower(sfp); } static void sfp_sm_mod_remove(struct sfp *sfp) @@ -560,17 +679,25 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) if (event == SFP_E_REMOVE) { sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); } else if (event == SFP_E_TIMEOUT) { - int err = sfp_sm_mod_probe(sfp); + int val = sfp_sm_mod_probe(sfp); - if (err == 0) + if (val == 0) sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0); - else if (err == -EAGAIN) - sfp_sm_set_timer(sfp, T_PROBE_RETRY); - else + else if (val > 0) + sfp_sm_ins_next(sfp, SFP_MOD_HPOWER, val); + else if (val != -EAGAIN) sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0); + else + sfp_sm_set_timer(sfp, T_PROBE_RETRY); } break; + case SFP_MOD_HPOWER: + if (event == SFP_E_TIMEOUT) { + sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0); + break; + } + /* fallthrough */ case SFP_MOD_PRESENT: case SFP_MOD_ERROR: if (event == SFP_E_REMOVE) { @@ -889,6 +1016,14 @@ static int sfp_probe(struct platform_device *pdev) if (!(sfp->gpio[GPIO_MODDEF0])) sfp->get_state = sff_gpio_get_state; + device_property_read_u32(&pdev->dev, "maximum-power-milliwatt", + &sfp->max_power_mW); + if (!sfp->max_power_mW) + sfp->max_power_mW = 1000; + + dev_info(sfp->dev, "Host maximum power %u.%uW\n", + sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); if (!sfp->sfp_bus) return -ENOMEM; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 1e2d4f1179da..f17b3441779b 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -417,7 +417,7 @@ static void ks8995_parse_dt(struct ks8995_switch *ks) static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", - .mode = S_IRUSR | S_IWUSR, + .mode = 0600, }, .size = KS8995_REGS_SIZE, .read = ks8995_registers_read, diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index fb2cef764e9a..22f3bdd8206c 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -34,39 +34,17 @@ MODULE_LICENSE("GPL v2"); MDIO_PHYXS_LNSTAT_SYNC3 | \ MDIO_PHYXS_LNSTAT_ALIGN) -static int teranetics_config_init(struct phy_device *phydev) -{ - phydev->supported = SUPPORTED_10000baseT_Full; - phydev->advertising = SUPPORTED_10000baseT_Full; - - return 0; -} - -static int teranetics_soft_reset(struct phy_device *phydev) -{ - return 0; -} - static int teranetics_aneg_done(struct phy_device *phydev) { - int reg; - /* auto negotiation state can only be checked when using copper * port, if using fiber port, just lie it's done. */ - if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); - return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); - } + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) + return genphy_c45_aneg_done(phydev); return 1; } -static int teranetics_config_aneg(struct phy_device *phydev) -{ - return 0; -} - static int teranetics_read_status(struct phy_device *phydev) { int reg; @@ -102,10 +80,10 @@ static struct phy_driver teranetics_driver[] = { .phy_id = PHY_ID_TN2020, .phy_id_mask = 0xffffffff, .name = "Teranetics TN2020", - .soft_reset = teranetics_soft_reset, + .soft_reset = gen10g_no_soft_reset, .aneg_done = teranetics_aneg_done, - .config_init = teranetics_config_init, - .config_aneg = teranetics_config_aneg, + .config_init = gen10g_config_init, + .config_aneg = gen10g_config_aneg, .read_status = teranetics_read_status, .match_phy_device = teranetics_match_phy_device, }, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 255a5def56e9..dc7c7ec43202 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -257,7 +257,7 @@ struct ppp_net { /* Prototypes. */ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg); -static void ppp_xmit_process(struct ppp *ppp); +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); static void ppp_channel_push(struct channel *pch); @@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, goto out; } - skb_queue_tail(&pf->xq, skb); - switch (pf->kind) { case INTERFACE: - ppp_xmit_process(PF_TO_PPP(pf)); + ppp_xmit_process(PF_TO_PPP(pf), skb); break; case CHANNEL: + skb_queue_tail(&pf->xq, skb); ppp_channel_push(PF_TO_CHANNEL(pf)); break; } @@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) put_unaligned_be16(proto, pp); skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); - skb_queue_tail(&ppp->file.xq, skb); - ppp_xmit_process(ppp); + ppp_xmit_process(ppp, skb); + return NETDEV_TX_OK; outf: @@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev) */ /* Called to do any work queued up on the transmit side that can now be done */ -static void __ppp_xmit_process(struct ppp *ppp) +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { - struct sk_buff *skb; - ppp_xmit_lock(ppp); if (!ppp->closing) { ppp_push(ppp); + + if (skb) + skb_queue_tail(&ppp->file.xq, skb); while (!ppp->xmit_pending && (skb = skb_dequeue(&ppp->file.xq))) ppp_send_frame(ppp, skb); @@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp) ppp_xmit_unlock(ppp); } -static void ppp_xmit_process(struct ppp *ppp) +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { local_bh_disable(); @@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp) goto err; (*this_cpu_ptr(ppp->xmit_recursion))++; - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, skb); (*this_cpu_ptr(ppp->xmit_recursion))--; local_bh_enable(); @@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp) err: local_bh_enable(); + kfree_skb(skb); + if (net_ratelimit()) netdev_err(ppp->dev, "recursion detected\n"); } @@ -1684,7 +1686,7 @@ ppp_push(struct ppp *ppp) #ifdef CONFIG_PPP_MULTILINK static bool mp_protocol_compress __read_mostly = true; -module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); +module_param(mp_protocol_compress, bool, 0644); MODULE_PARM_DESC(mp_protocol_compress, "compress protocol id in multilink fragments"); @@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch) if (skb_queue_empty(&pch->file.xq)) { ppp = pch->ppp; if (ppp) - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, NULL); } } @@ -3161,6 +3163,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5aa59f41bf8c..1483bc7b01e1 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -714,7 +714,7 @@ err_put: } static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, - int *usockaddr_len, int peer) + int peer) { int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; @@ -726,9 +726,7 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, memcpy(uaddr, &sp, len); - *usockaddr_len = len; - - return 0; + return len; } static int pppoe_ioctl(struct socket *sock, unsigned int cmd, @@ -1144,7 +1142,7 @@ static __net_init int pppoe_init_net(struct net *net) rwlock_init(&pn->hash_lock); - pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops); + pde = proc_create("pppoe", 0444, net->proc_net, &pppoe_seq_fops); #ifdef CONFIG_PROC_FS if (!pde) return -ENOMEM; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 6dde9a0cfe76..c4267ecefd85 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; - ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); @@ -483,7 +482,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, } static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, - int *usockaddr_len, int peer) + int peer) { int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; @@ -496,9 +495,7 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, memcpy(uaddr, &sp, len); - *usockaddr_len = len; - - return 0; + return len; } static int pptp_release(struct socket *sock) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a468439969df..a6c6ce19eeee 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1105,14 +1105,15 @@ static void team_port_disable_netpoll(struct team_port *port) } #endif -static int team_upper_dev_link(struct team *team, struct team_port *port) +static int team_upper_dev_link(struct team *team, struct team_port *port, + struct netlink_ext_ack *extack) { struct netdev_lag_upper_info lag_upper_info; int err; lag_upper_info.tx_type = team->mode->lag_tx_type; err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, - &lag_upper_info, NULL); + &lag_upper_info, extack); if (err) return err; port->dev->priv_flags |= IFF_TEAM_PORT; @@ -1129,7 +1130,8 @@ static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev); -static int team_port_add(struct team *team, struct net_device *port_dev) +static int team_port_add(struct team *team, struct net_device *port_dev, + struct netlink_ext_ack *extack) { struct net_device *dev = team->dev; struct team_port *port; @@ -1137,12 +1139,14 @@ static int team_port_add(struct team *team, struct net_device *port_dev) int err; if (port_dev->flags & IFF_LOOPBACK) { + NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port"); netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } if (team_port_exists(port_dev)) { + NL_SET_ERR_MSG(extack, "Device is already a port of a team device"); netdev_err(dev, "Device %s is already a port " "of a team device\n", portname); return -EBUSY; @@ -1150,6 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", portname); return -EPERM; @@ -1160,6 +1165,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return err; if (port_dev->flags & IFF_UP) { + NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port"); netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); return -EBUSY; @@ -1197,11 +1203,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_dev_open; } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); - err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", @@ -1227,7 +1228,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } - err = team_upper_dev_link(team, port); + err = team_upper_dev_link(team, port, extack); if (err) { netdev_err(dev, "Device %s failed to set upper link\n", portname); @@ -1241,6 +1242,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_option_port_add; } + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); @@ -1265,8 +1271,6 @@ err_enable_netpoll: vlan_vids_del_by_dev(port_dev, dev); err_vids_add: - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); dev_close(port_dev); err_dev_open: @@ -1921,7 +1925,7 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev, int err; mutex_lock(&team->lock); - err = team_port_add(team, port_dev); + err = team_port_add(team, port_dev, extack); mutex_unlock(&team->lock); if (!err) @@ -2395,7 +2399,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } @@ -2681,7 +2685,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index ca5e375de27c..e0d6760f3219 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -166,6 +166,8 @@ struct tbnet_ring { * @connected_work: Worker that finalizes the ThunderboltIP connection * setup and enables DMA paths for high speed data * transfers + * @disconnect_work: Worker that handles tearing down the ThunderboltIP + * connection * @rx_hdr: Copy of the currently processed Rx frame. Used when a * network packet consists of multiple Thunderbolt frames. * In host byte order. @@ -190,6 +192,7 @@ struct tbnet { int login_retries; struct delayed_work login_work; struct work_struct connected_work; + struct work_struct disconnect_work; struct thunderbolt_ip_frame_header rx_hdr; struct tbnet_ring rx_ring; atomic_t frame_id; @@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) case TBIP_LOGOUT: ret = tbnet_logout_response(net, route, sequence, command_id); if (!ret) - tbnet_tear_down(net, false); + queue_work(system_long_wq, &net->disconnect_work); break; default: @@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work) } } +static void tbnet_disconnect_work(struct work_struct *work) +{ + struct tbnet *net = container_of(work, typeof(*net), disconnect_work); + + tbnet_tear_down(net, false); +} + static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, const struct thunderbolt_ip_frame_header *hdr) { @@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev) napi_disable(&net->napi); + cancel_work_sync(&net->disconnect_work); tbnet_tear_down(net, true); tb_ring_free(net->rx_ring.ring); @@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) net = netdev_priv(dev); INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); INIT_WORK(&net->connected_work, tbnet_connected_work); + INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); mutex_init(&net->connection_lock); atomic_set(&net->command_id, 0); atomic_set(&net->frame_id, 0); @@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) stop_login(net); if (netif_running(net->dev)) { netif_device_detach(net->dev); - tb_ring_stop(net->rx_ring.ring); - tb_ring_stop(net->tx_ring.ring); - tbnet_free_buffers(&net->rx_ring); - tbnet_free_buffers(&net->tx_ring); + tbnet_tear_down(net, true); } return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..a1ba262f40ad 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -78,6 +78,7 @@ #include <linux/mutex.h> #include <linux/uaccess.h> +#include <linux/proc_fs.h> /* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ @@ -181,7 +182,6 @@ struct tun_file { struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; - int xdp_pending_pkts; }; struct tun_flow_entry { @@ -656,7 +656,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) return tun; } -static void tun_ptr_free(void *ptr) +void tun_ptr_free(void *ptr) { if (!ptr) return; @@ -668,6 +668,7 @@ static void tun_ptr_free(void *ptr) __skb_array_destroy_skb(ptr); } } +EXPORT_SYMBOL_GPL(tun_ptr_free); static void tun_queue_purge(struct tun_file *tfile) { @@ -1489,27 +1490,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { + struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; - unsigned long offset; - struct page *page; - void *data; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - local_bh_disable(); - data = napi_alloc_frag(fragsz); - local_bh_enable(); - if (!data) { + if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { err = -ENOMEM; goto free; } - page = virt_to_head_page(data); - offset = data - page_address(page); - skb_fill_page_desc(skb, i - 1, page, offset, fragsz); + skb_fill_page_desc(skb, i - 1, pfrag->page, + pfrag->offset, fragsz); + page_ref_inc(pfrag->page); + pfrag->offset += fragsz; } return skb; @@ -1617,7 +1614,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, unsigned int delta = 0; char *buf; size_t copied; - bool xdp_xmit = false; int err, pad = TUN_RX_PAD; rcu_read_lock(); @@ -1647,6 +1643,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; + preempt_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1666,15 +1663,22 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_REDIRECT: get_page(alloc_frag->page); alloc_frag->offset += buflen; - ++tfile->xdp_pending_pkts; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; rcu_read_unlock(); + preempt_enable(); return NULL; case XDP_TX: - xdp_xmit = true; - /* fall through */ + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + if (tun_xdp_xmit(tun->dev, &xdp)) + goto err_redirect; + tun_xdp_flush(tun->dev); + rcu_read_unlock(); + preempt_enable(); + return NULL; case XDP_PASS: delta = orig_data - xdp.data; break; @@ -1692,6 +1696,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + preempt_enable(); return ERR_PTR(-ENOMEM); } @@ -1700,14 +1705,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (xdp_xmit) { - skb->dev = tun->dev; - generic_xdp_tx(skb, xdp_prog); - rcu_read_unlock(); - return NULL; - } - rcu_read_unlock(); + preempt_enable(); return skb; @@ -1715,6 +1714,7 @@ err_redirect: put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + preempt_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -1988,11 +1988,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK, false); - if (tfile->xdp_pending_pkts) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return result; } @@ -2290,11 +2285,67 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[], return -EINVAL; } +static size_t tun_get_size(const struct net_device *dev) +{ + BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); + BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); + + return nla_total_size(sizeof(uid_t)) + /* OWNER */ + nla_total_size(sizeof(gid_t)) + /* GROUP */ + nla_total_size(sizeof(u8)) + /* TYPE */ + nla_total_size(sizeof(u8)) + /* PI */ + nla_total_size(sizeof(u8)) + /* VNET_HDR */ + nla_total_size(sizeof(u8)) + /* PERSIST */ + nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ + nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ + nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ + 0; +} + +static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + + if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) + goto nla_put_failure; + if (uid_valid(tun->owner) && + nla_put_u32(skb, IFLA_TUN_OWNER, + from_kuid_munged(current_user_ns(), tun->owner))) + goto nla_put_failure; + if (gid_valid(tun->group) && + nla_put_u32(skb, IFLA_TUN_GROUP, + from_kgid_munged(current_user_ns(), tun->group))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, + !!(tun->flags & IFF_MULTI_QUEUE))) + goto nla_put_failure; + if (tun->flags & IFF_MULTI_QUEUE) { + if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, + tun->numdisabled)) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, + .get_size = tun_get_size, + .fill_info = tun_fill_info, }; static void tun_sock_write_space(struct sock *sk) @@ -2329,13 +2380,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); - - if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || - !(m->msg_flags & MSG_MORE)) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return ret; } @@ -2793,6 +2837,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, struct tun_struct *tun; void __user* argp = (void __user*)arg; struct ifreq ifr; + struct net *net; kuid_t owner; kgid_t group; int sndbuf; @@ -2801,7 +2846,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, int le; int ret; - if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || + (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { @@ -2821,6 +2867,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, rtnl_lock(); tun = tun_get(tfile); + net = sock_net(&tfile->sk); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) @@ -2828,7 +2875,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ifr.ifr_name[IFNAMSIZ-1] = '\0'; - ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); + ret = tun_set_iff(net, file, &ifr); if (ret) goto unlock; @@ -2850,6 +2897,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tfile->ifindex = ifindex; goto unlock; } + if (cmd == SIOCGSKNS) { + ret = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + goto unlock; + + ret = open_related_ns(&net->ns, get_net_ns); + goto unlock; + } ret = -EBADFD; if (!tun) @@ -3167,7 +3222,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - tfile->xdp_pending_pkts = 0; return 0; } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index f32261ecd215..a6ef75907ae9 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1556,7 +1556,6 @@ static int ax88179_reset(struct usbnet *dev) ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN, dev->net->dev_addr); - memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN); /* RX bulk configuration */ memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5); diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index f7180f8db39e..61ea4eaace5d 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -83,11 +83,8 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf) int status = 0; status = usbnet_get_endpoints(dev, intf); - if (status < 0) { - usb_set_intfdata(intf, NULL); - usb_driver_release_interface(driver_of(intf), intf); + if (status < 0) return status; - } /* no jumbogram (16K) support for now */ diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..fff4b13eece2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -896,6 +896,12 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { + /* Cinterion PLS8 modem by GEMALTO */ + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9e1b74590682..90d07ed224d5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -58,7 +58,7 @@ static bool prefer_mbim = true; #else static bool prefer_mbim; #endif -module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); +module_param(prefer_mbim, bool, 0644); MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); static void cdc_ncm_txpath_bh(unsigned long param); @@ -281,10 +281,10 @@ static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_att return len; } -static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt); -static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max); -static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max); -static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs); +static DEVICE_ATTR(min_tx_pkt, 0644, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt); +static DEVICE_ATTR(rx_max, 0644, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max); +static DEVICE_ATTR(tx_max, 0644, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max); +static DEVICE_ATTR(tx_timer_usecs, 0644, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs); static ssize_t ndp_to_end_show(struct device *d, struct device_attribute *attr, char *buf) { @@ -335,7 +335,7 @@ static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *at struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \ return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \ } \ -static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL) +static DEVICE_ATTR(name, 0444, cdc_ncm_show_##name, NULL) NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu); NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 981c931a7a1f..e53883ad6107 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -519,7 +519,7 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev, return sprintf(buf, "%s\n", port_name); } -static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); +static DEVICE_ATTR(hsotype, 0444, hso_sysfs_show_porttype, NULL); static struct attribute *hso_serial_dev_attrs[] = { &dev_attr_hsotype.attr, @@ -3289,12 +3289,12 @@ MODULE_LICENSE("GPL"); /* change the debug level (eg: insmod hso.ko debug=0x04) */ MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); -module_param(debug, int, S_IRUGO | S_IWUSR); +module_param(debug, int, 0644); /* set the major tty number (eg: insmod hso.ko tty_major=245) */ MODULE_PARM_DESC(tty_major, "Set the major tty number"); -module_param(tty_major, int, S_IRUGO | S_IWUSR); +module_param(tty_major, int, 0644); /* disable network interface (eg: insmod hso.ko disable_net=1) */ MODULE_PARM_DESC(disable_net, "Disable the network interface"); -module_param(disable_net, int, S_IRUGO | S_IWUSR); +module_param(disable_net, int, 0644); diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index ce0b0b4e3a57..bd2ba3659028 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -114,14 +114,14 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) return -ENOMEM; memcpy(usb_buf, init_msg_1, 12); - status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1) - / sizeof(init_msg_1[0]), usb_buf, 24); + status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), + usb_buf, 24); if (status != 0) return status; memcpy(usb_buf, init_msg_2, 12); - status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2) - / sizeof(init_msg_2[0]), usb_buf, 28); + status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), + usb_buf, 28); if (status != 0) return status; @@ -150,12 +150,8 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); - - if (status) { - usb_set_intfdata(intf, NULL); - usb_driver_release_interface(driver_of(intf), intf); + if (status) return status; - } memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 60a604cc7647..aff105f5f58c 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2082,10 +2082,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) dev->fc_autoneg = phydev->autoneg; - phy_start(phydev); - - netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); - return 0; error: @@ -2351,6 +2347,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) u32 buf; int ret = 0; unsigned long timeout; + u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); buf |= HW_CFG_LRST_; @@ -2450,6 +2447,15 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; + + if (dev->chipid == ID_REV_CHIP_ID_7800_) { + ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); + if (!ret && sig != EEPROM_INDICATOR) { + /* Implies there is no external eeprom. Set mac speed */ + netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + } + } ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); @@ -2512,9 +2518,9 @@ static int lan78xx_open(struct net_device *net) if (ret < 0) goto done; - ret = lan78xx_phy_init(dev); - if (ret < 0) - goto done; + phy_start(net->phydev); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); /* for Link Check */ if (dev->urb_intr) { @@ -2575,13 +2581,8 @@ static int lan78xx_stop(struct net_device *net) if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - - phy_stop(net->phydev); - phy_disconnect(net->phydev); - - net->phydev = NULL; + if (net->phydev) + phy_stop(net->phydev); clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); @@ -2863,8 +2864,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (ret < 0) { netdev_warn(dev->net, "lan78xx_setup_irq_domain() failed : %d", ret); - kfree(pdata); - return ret; + goto out1; } dev->net->hard_header_len += TX_OVERHEAD; @@ -2872,14 +2872,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) /* Init all registers */ ret = lan78xx_reset(dev); + if (ret) { + netdev_warn(dev->net, "Registers INIT FAILED...."); + goto out2; + } ret = lan78xx_mdio_init(dev); + if (ret) { + netdev_warn(dev->net, "MDIO INIT FAILED....."); + goto out2; + } dev->net->flags |= IFF_MULTICAST; pdata->wol = WAKE_MAGIC; return ret; + +out2: + lan78xx_remove_irq_domain(dev); + +out1: + netdev_warn(dev->net, "Bind routine FAILED"); + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); + kfree(pdata); + return ret; } static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) @@ -2891,6 +2909,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) lan78xx_remove_mdio(dev); if (pdata) { + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); netif_dbg(dev, ifdown, dev->net, "free pdata"); kfree(pdata); pdata = NULL; @@ -3477,8 +3497,13 @@ static void lan78xx_disconnect(struct usb_interface *intf) return; udev = interface_to_usbdev(intf); - net = dev->net; + + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); + phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); + + phy_disconnect(net->phydev); + unregister_netdev(net); cancel_delayed_work_sync(&dev->wq); @@ -3634,8 +3659,14 @@ static int lan78xx_probe(struct usb_interface *intf, pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); + ret = lan78xx_phy_init(dev); + if (ret < 0) + goto out4; + return 0; +out4: + unregister_netdev(netdev); out3: lan78xx_unbind(dev, intf); out2: @@ -3983,7 +4014,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf) lan78xx_reset(dev); - lan78xx_phy_init(dev); + phy_start(dev->net->phydev); return lan78xx_resume(intf); } diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index dbabd7ca5268..257916f172cd 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c @@ -157,12 +157,8 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) s->current_rx_buf = skb_copy_expand(skb, 0, le32_to_cpup(&frame->len), GFP_ATOMIC); - if (!s->current_rx_buf) { - netif_err(dev, ifup, dev->net, "Reserving %i bytes " - "for packet assembly failed.\n", - le32_to_cpup(&frame->len)); + if (!s->current_rx_buf) dev->net->stats.rx_errors++; - } return 0; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 76ac48095c29..ca066b785e9f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1104,6 +1104,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1180,6 +1183,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ @@ -1240,6 +1244,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958b2e8b90f6..86f7196f9d91 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) tx_data += len; agg->skb_len += len; - agg->skb_num++; + agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195..7a6a1fe79309 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) + if (ret < 0) { netdev_warn(dev->net, "Error writing RFE_CTL\n"); - - return ret; + return ret; + } + return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8a22ff67b026..d9eea8cfe6cb 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) if (skb->protocol == 0) skb->protocol = eth_type_trans (skb, dev->net); - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->rx_packets++; stats64->rx_bytes += skb->len; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); @@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb) if (urb->status == 0) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->tx_packets += entry->packets; stats64->tx_bytes += entry->length; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); } else { dev->net->stats.tx_errors++; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 626c27352ae2..7b187ec7411e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); - if (unlikely(err)) { - struct page *page = virt_to_head_page(xdp->data); - - put_page(page); - return false; - } + if (unlikely(err)) + return false; /* Caller handle free/refcnt */ return true; } @@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) { struct virtnet_info *vi = netdev_priv(dev); - bool sent = __virtnet_xdp_xmit(vi, xdp); + struct receive_queue *rq = vi->rq; + struct bpf_prog *xdp_prog; + bool sent; + + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this + * indicate XDP resources have been successfully allocated. + */ + xdp_prog = rcu_dereference(rq->xdp_prog); + if (!xdp_prog) + return -ENXIO; + sent = __virtnet_xdp_xmit(vi, xdp); if (!sent) return -ENOSPC; return 0; @@ -498,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, page_off += *len; while (--*num_buf) { + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); unsigned int buflen; void *buf; int off; @@ -512,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, /* guard against a misconfigured or uncooperative backend that * is sending packet larger than the MTU. */ - if ((page_off + buflen) > PAGE_SIZE) { + if ((page_off + buflen + tailroom) > PAGE_SIZE) { put_page(p); goto err_buf; } @@ -546,8 +553,11 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); struct page *page = virt_to_head_page(buf); - unsigned int delta = 0, err; + unsigned int delta = 0; struct page *xdp_page; + bool sent; + int err; + len -= vi->hdr_len; rcu_read_lock(); @@ -558,7 +568,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *orig_data; u32 act; - if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) + if (unlikely(hdr->hdr.gso_type)) goto err_xdp; if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { @@ -596,16 +606,19 @@ static struct sk_buff *receive_small(struct net_device *dev, delta = orig_data - xdp.data; break; case XDP_TX: - if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) + sent = __virtnet_xdp_xmit(vi, &xdp); + if (unlikely(!sent)) { trace_xdp_exception(vi->dev, xdp_prog, act); - else - *xdp_xmit = true; + goto err_xdp; + } + *xdp_xmit = true; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: err = xdp_do_redirect(dev, &xdp, xdp_prog); - if (!err) - *xdp_xmit = true; + if (err) + goto err_xdp; + *xdp_xmit = true; rcu_read_unlock(); goto xdp_xmit; default: @@ -677,6 +690,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct bpf_prog *xdp_prog; unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); + bool sent; int err; head_skb = NULL; @@ -689,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *data; u32 act; - /* This happens when rx buffer size is underestimated */ + /* This happens when rx buffer size is underestimated + * or headroom is not enough because of the buffer + * was refilled before XDP is set. This should only + * happen for the first several packets, so we don't + * care much about its performance. + */ if (unlikely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ @@ -724,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, act = bpf_prog_run_xdp(xdp_prog, &xdp); - if (act != XDP_PASS) - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); - switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -746,18 +762,28 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) + sent = __virtnet_xdp_xmit(vi, &xdp); + if (unlikely(!sent)) { trace_xdp_exception(vi->dev, xdp_prog, act); - else - *xdp_xmit = true; + if (unlikely(xdp_page != page)) + put_page(xdp_page); + goto err_xdp; + } + *xdp_xmit = true; if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: err = xdp_do_redirect(dev, &xdp, xdp_prog); - if (!err) - *xdp_xmit = true; + if (err) { + if (unlikely(xdp_page != page)) + put_page(xdp_page); + goto err_xdp; + } + *xdp_xmit = true; + if (unlikely(xdp_page != page)) + goto err_xdp; rcu_read_unlock(); goto xdp_xmit; default: @@ -1003,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, } static unsigned int get_mergeable_buf_len(struct receive_queue *rq, - struct ewma_pkt_len *avg_pkt_len) + struct ewma_pkt_len *avg_pkt_len, + unsigned int room) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; - len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), + if (room) + return PAGE_SIZE - room; + + len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), rq->min_buf_len, PAGE_SIZE - hdr_len); + return ALIGN(len, L1_CACHE_BYTES); } @@ -1018,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, { struct page_frag *alloc_frag = &rq->alloc_frag; unsigned int headroom = virtnet_get_headroom(vi); + unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); char *buf; void *ctx; int err; unsigned int len, hole; - len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); - if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) + /* Extra tailroom is needed to satisfy XDP's assumption. This + * means rx frags coalescing won't work, but consider we've + * disabled GSO for XDP, it won't be a big issue. + */ + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); + if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) return -ENOMEM; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ get_page(alloc_frag->page); - alloc_frag->offset += len + headroom; + alloc_frag->offset += len + room; hole = alloc_frag->size - alloc_frag->offset; - if (hole < len + headroom) { + if (hole < len + room) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. @@ -2175,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); + if (netif_running(dev)) + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); @@ -2195,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (old_prog) bpf_prog_put(old_prog); - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (netif_running(dev)) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; @@ -2566,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); + unsigned int headroom = virtnet_get_headroom(vi); + unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; struct ewma_pkt_len *avg; BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; return sprintf(buf, "%u\n", - get_mergeable_buf_len(&vi->rq[queue_index], avg)); + get_mergeable_buf_len(&vi->rq[queue_index], avg, + SKB_DATA_ALIGN(headroom + tailroom))); } static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = @@ -2815,8 +2857,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* Assume link up if device can't report link status, otherwise get link status from config. */ + netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 8b39c160743d..e04937f44f33 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, { int ret; u32 count; + int num_pkts; + int tx_num_deferred; unsigned long flags; struct vmxnet3_tx_ctx ctx; union Vmxnet3_GenericDesc *gdesc; @@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, #else gdesc = ctx.sop_txd; #endif + tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); if (ctx.mss) { gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; gdesc->txd.om = VMXNET3_OM_TSO; gdesc->txd.msscof = ctx.mss; - le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); + num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { gdesc->txd.hlen = ctx.eth_ip_hdr_size; @@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.om = 0; gdesc->txd.msscof = 0; } - le32_add_cpu(&tq->shared->txNumDeferred, 1); + num_pkts = 1; } + le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); + tx_num_deferred += num_pkts; if (skb_vlan_tag_present(skb)) { gdesc->txd.ti = 1; @@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, spin_unlock_irqrestore(&tq->tx_lock, flags); - if (le32_to_cpu(tq->shared->txNumDeferred) >= - le32_to_cpu(tq->shared->txThreshold)) { + if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { tq->shared->txNumDeferred = 0; VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD + tq->qid * 8, @@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, vmxnet3_rx_csum(adapter, skb, (union Vmxnet3_GenericDesc *)rcd); skb->protocol = eth_type_trans(skb, adapter->netdev); - if (!rcd->tcp || !adapter->lro) + if (!rcd->tcp || + !(adapter->netdev->features & NETIF_F_LRO)) goto not_lro; if (segCnt != 0 && mss != 0) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 5ba222920e80..59ec34052a65 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -342,9 +342,6 @@ struct vmxnet3_adapter { u8 __iomem *hw_addr1; /* for BAR 1 */ u8 version; - bool rxcsum; - bool lro; - #ifdef VMXNET3_RSS struct UPT1_RSSConf *rss_conf; bool rss; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 139c61c8244a..0a2b180d138a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -578,12 +578,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb); + rcu_read_unlock_bh(); + return ret; } rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; } @@ -736,7 +737,6 @@ static int vrf_rtable_create(struct net_device *dev) return -ENOMEM; rth->dst.output = vrf_output; - rth->rt_table_id = vrf->tb_id; rcu_assign_pointer(vrf->rth, rth); @@ -942,6 +942,7 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net, const struct net_device *dev, struct flowi6 *fl6, int ifindex, + const struct sk_buff *skb, int flags) { struct net_vrf *vrf = netdev_priv(dev); @@ -960,7 +961,7 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net, if (!table) return NULL; - return ip6_pol_route(net, table, ifindex, fl6, flags); + return ip6_pol_route(net, table, ifindex, fl6, skb, flags); } static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, @@ -978,7 +979,7 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; - rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, + rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb, RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); if (unlikely(!rt6)) return; @@ -1111,7 +1112,7 @@ static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev, if (!ipv6_addr_any(&fl6->saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; - rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags); + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags); if (rt) dst = &rt->dst; @@ -1146,6 +1147,7 @@ static inline size_t vrf_fib_rule_nl_size(void) sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ + sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */ return sz; } @@ -1176,6 +1178,9 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) frh->family = family; frh->action = FR_ACT_TO_TBL; + if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL)) + goto nla_put_failure; + if (nla_put_u8(skb, FRA_L3MDEV, 1)) goto nla_put_failure; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index afeca6bcdade..ab8b3cbbb205 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c index b78ee676e102..5b64bda7d9e7 100644 --- a/drivers/net/wimax/i2400m/usb-rx.c +++ b/drivers/net/wimax/i2400m/usb-rx.c @@ -263,9 +263,6 @@ retry: new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len, GFP_KERNEL); if (new_skb == NULL) { - if (printk_ratelimit()) - dev_err(dev, "RX: Can't reallocate skb to %d; " - "RX dropped\n", rx_size); kfree_skb(rx_skb); rx_skb = NULL; goto out; /* drop it...*/ diff --git a/drivers/net/wireless/admtek/Kconfig b/drivers/net/wireless/admtek/Kconfig index d5a2dc728078..9317367e37f0 100644 --- a/drivers/net/wireless/admtek/Kconfig +++ b/drivers/net/wireless/admtek/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_ADMTEK If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_ADMTEK diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index 44b2470af81d..82ab7c33cf97 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -8,8 +8,8 @@ config WLAN_VENDOR_ATH If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. For more information and documentation on this module you can visit: diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index f3f2784f6ebd..7a364eca46d6 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -33,8 +33,6 @@ */ #define ATH_KEYMAX 128 /* max key cache size we handle */ -static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - struct ath_ani { bool caldone; unsigned int longcal_timer; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index f3ec13b80b20..8a3020dbd4cf 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2040,7 +2041,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | - WMI_10_4_STAT_PEER_EXTD; + WMI_10_4_STAT_PEER_EXTD | + WMI_10_4_STAT_VDEV_EXTD; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS; @@ -2281,6 +2283,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, if (ath10k_peer_stats_enabled(ar)) val = WMI_10_4_PEER_STATS; + /* Enable vdev stats by default */ + val |= WMI_10_4_VDEV_STATS; + if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64; @@ -2439,7 +2444,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_hif_power_up(ar); if (ret) { - ath10k_err(ar, "could not start pci hif (%d)\n", ret); + ath10k_err(ar, "could not power on hif bus (%d)\n", ret); return ret; } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index fe6b30356d3b..c17d805d68cc 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -221,6 +222,27 @@ struct ath10k_fw_stats_vdev { u32 beacon_rssi_history[10]; }; +struct ath10k_fw_stats_vdev_extd { + struct list_head list; + + u32 vdev_id; + u32 ppdu_aggr_cnt; + u32 ppdu_noack; + u32 mpdu_queued; + u32 ppdu_nonaggr_cnt; + u32 mpdu_sw_requeued; + u32 mpdu_suc_retry; + u32 mpdu_suc_multitry; + u32 mpdu_fail_retry; + u32 tx_ftm_suc; + u32 tx_ftm_suc_retry; + u32 tx_ftm_fail; + u32 rx_ftmr_cnt; + u32 rx_ftmr_dup_cnt; + u32 rx_iftmr_cnt; + u32 rx_iftmr_dup_cnt; +}; + struct ath10k_fw_stats_pdev { struct list_head list; @@ -324,6 +346,27 @@ struct ath10k_tpc_stats { struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG]; }; +struct ath10k_tpc_table_final { + u32 pream_idx[WMI_TPC_FINAL_RATE_MAX]; + u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; + char tpc_value[WMI_TPC_FINAL_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; +}; + +struct ath10k_tpc_stats_final { + u32 reg_domain; + u32 chan_freq; + u32 phy_mode; + u32 twice_antenna_reduction; + u32 twice_max_rd_power; + s32 twice_antenna_gain; + u32 power_limit; + u32 num_tx_chain; + u32 ctl; + u32 rate_max; + u8 flag[WMI_TPC_FLAG]; + struct ath10k_tpc_table_final tpc_table_final[WMI_TPC_FLAG]; +}; + struct ath10k_dfs_stats { u32 phy_errors; u32 pulses_total; @@ -354,6 +397,45 @@ struct ath10k_txq { unsigned long num_push_allowed; }; +enum ath10k_pkt_rx_err { + ATH10K_PKT_RX_ERR_FCS, + ATH10K_PKT_RX_ERR_TKIP, + ATH10K_PKT_RX_ERR_CRYPT, + ATH10K_PKT_RX_ERR_PEER_IDX_INVAL, + ATH10K_PKT_RX_ERR_MAX, +}; + +enum ath10k_ampdu_subfrm_num { + ATH10K_AMPDU_SUBFRM_NUM_10, + ATH10K_AMPDU_SUBFRM_NUM_20, + ATH10K_AMPDU_SUBFRM_NUM_30, + ATH10K_AMPDU_SUBFRM_NUM_40, + ATH10K_AMPDU_SUBFRM_NUM_50, + ATH10K_AMPDU_SUBFRM_NUM_60, + ATH10K_AMPDU_SUBFRM_NUM_MORE, + ATH10K_AMPDU_SUBFRM_NUM_MAX, +}; + +enum ath10k_amsdu_subfrm_num { + ATH10K_AMSDU_SUBFRM_NUM_1, + ATH10K_AMSDU_SUBFRM_NUM_2, + ATH10K_AMSDU_SUBFRM_NUM_3, + ATH10K_AMSDU_SUBFRM_NUM_4, + ATH10K_AMSDU_SUBFRM_NUM_MORE, + ATH10K_AMSDU_SUBFRM_NUM_MAX, +}; + +struct ath10k_sta_tid_stats { + unsigned long int rx_pkt_from_fw; + unsigned long int rx_pkt_unchained; + unsigned long int rx_pkt_drop_chained; + unsigned long int rx_pkt_drop_filter; + unsigned long int rx_pkt_err[ATH10K_PKT_RX_ERR_MAX]; + unsigned long int rx_pkt_queued_for_mac; + unsigned long int rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX]; + unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX]; +}; + struct ath10k_sta { struct ath10k_vif *arvif; @@ -371,6 +453,9 @@ struct ath10k_sta { #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ bool aggr_mode; + + /* Protected with ar->data_lock */ + struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1]; #endif }; @@ -487,6 +572,7 @@ struct ath10k_debug { /* used for tpc-dump storage, protected by data-lock */ struct ath10k_tpc_stats *tpc_stats; + struct ath10k_tpc_stats_final *tpc_stats_final; struct completion tpc_complete; @@ -1019,6 +1105,8 @@ struct ath10k { void *ce_priv; + u32 sta_tid_stats_mask; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 7173b3743b43..f90cec0ebb1c 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -701,6 +701,89 @@ static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { }, }; +static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x80000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x98000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOSRAM, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x80000, + .len = 0x6000, + .name = "SOC REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { { .hw_id = QCA6174_HW_1_0_VERSION, @@ -758,6 +841,13 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { .size = ARRAY_SIZE(qca988x_hw20_mem_regions), }, }, + { + .hw_id = QCA9984_HW_1_0_DEV_VERSION, + .region_table = { + .regions = qca9984_hw10_mem_regions, + .size = ARRAY_SIZE(qca9984_hw10_mem_regions), + }, + }, }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h index bfee13038e59..3baaf9d2cbcd 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.h +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -124,6 +124,8 @@ enum ath10k_mem_region_type { ATH10K_MEM_REGION_TYPE_AXI = 3, ATH10K_MEM_REGION_TYPE_IRAM1 = 4, ATH10K_MEM_REGION_TYPE_IRAM2 = 5, + ATH10K_MEM_REGION_TYPE_IOSRAM = 6, + ATH10K_MEM_REGION_TYPE_IOREG = 7, }; /* Define a section of the region which should be copied. As not all parts diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 554cd7856cb6..bac832ce1873 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1480,6 +1480,19 @@ void ath10k_debug_tpc_stats_process(struct ath10k *ar, spin_unlock_bh(&ar->data_lock); } +void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats) +{ + spin_lock_bh(&ar->data_lock); + + kfree(ar->debug.tpc_stats_final); + ar->debug.tpc_stats_final = tpc_stats; + complete(&ar->debug.tpc_complete); + + spin_unlock_bh(&ar->data_lock); +} + static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, unsigned int j, char *buf, size_t *len) { @@ -2143,6 +2156,137 @@ static const struct file_operations fops_fw_checksums = { .llseek = default_llseek, }; +static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + ssize_t len; + u32 mask; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoint(buf, 0, &mask)) + return -EINVAL; + + ar->sta_tid_stats_mask = mask; + + return len; +} + +static const struct file_operations fops_sta_tid_stats_mask = { + .read = ath10k_sta_tid_stats_mask_read, + .write = ath10k_sta_tid_stats_mask_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->debug.tpc_complete); + + ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM); + if (ret) { + ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, + 1 * HZ); + if (time_left == 0) + return -ETIMEDOUT; + + return 0; +} + +static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_tpc_stats_final_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request tpc stats final: %d\n", + ret); + goto err_free; + } + + ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tpc_stats_final_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_tpc_stats_final_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + unsigned int len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_tpc_stats_final = { + .open = ath10k_tpc_stats_final_open, + .release = ath10k_tpc_stats_final_release, + .read = ath10k_tpc_stats_final_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); @@ -2258,6 +2402,16 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, &fops_fw_checksums); + if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS)) + debugfs_create_file("sta_tid_stats_mask", 0600, + ar->debug.debugfs_phy, + ar, &fops_sta_tid_stats_mask); + + if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map)) + debugfs_create_file("tpc_stats_final", 0400, + ar->debug.debugfs_phy, ar, + &fops_tpc_stats_final); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index e54308889e59..0afca5c106b6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -101,6 +102,9 @@ void ath10k_debug_unregister(struct ath10k *ar); void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); void ath10k_debug_tpc_stats_process(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats); +void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) @@ -164,6 +168,13 @@ static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar, kfree(tpc_stats); } +static inline void +ath10k_debug_tpc_stats_final_process(struct ath10k *ar, + struct ath10k_tpc_stats_final *tpc_stats) +{ + kfree(tpc_stats); +} + static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len) { @@ -191,12 +202,42 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); void ath10k_sta_update_rx_duration(struct ath10k *ar, struct ath10k_fw_stats *stats); +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long int num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long int unchain_cnt, + unsigned long int drop_cnt, + unsigned long int drop_cnt_filter, + unsigned long int queued_msdus); +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, + u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges); #else static inline void ath10k_sta_update_rx_duration(struct ath10k *ar, struct ath10k_fw_stats *stats) { } + +static inline +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long int num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long int unchain_cnt, + unsigned long int drop_cnt, + unsigned long int drop_cnt_filter, + unsigned long int queued_msdus) +{ +} + +static inline +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, + u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges) +{ +} #endif /* CONFIG_MAC80211_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index b260b09dd4d3..8f688f136c22 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -16,8 +17,125 @@ #include "core.h" #include "wmi-ops.h" +#include "txrx.h" #include "debug.h" +static void ath10k_rx_stats_update_amsdu_subfrm(struct ath10k *ar, + struct ath10k_sta_tid_stats *stats, + u32 msdu_count) +{ + if (msdu_count == 1) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_1]++; + else if (msdu_count == 2) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_2]++; + else if (msdu_count == 3) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_3]++; + else if (msdu_count == 4) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_4]++; + else if (msdu_count > 4) + stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MORE]++; +} + +static void ath10k_rx_stats_update_ampdu_subfrm(struct ath10k *ar, + struct ath10k_sta_tid_stats *stats, + u32 mpdu_count) +{ + if (mpdu_count <= 10) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_10]++; + else if (mpdu_count <= 20) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_20]++; + else if (mpdu_count <= 30) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_30]++; + else if (mpdu_count <= 40) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_40]++; + else if (mpdu_count <= 50) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_50]++; + else if (mpdu_count <= 60) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_60]++; + else if (mpdu_count > 60) + stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MORE]++; +} + +void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid, + struct htt_rx_indication_mpdu_range *ranges, + int num_ranges) +{ + struct ath10k_sta *arsta; + struct ath10k_peer *peer; + int i; + + if (tid > IEEE80211_NUM_TIDS || !(ar->sta_tid_stats_mask & BIT(tid))) + return; + + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) + goto out; + + arsta = (struct ath10k_sta *)peer->sta->drv_priv; + + for (i = 0; i < num_ranges; i++) + ath10k_rx_stats_update_ampdu_subfrm(ar, + &arsta->tid_stats[tid], + ranges[i].mpdu_count); + +out: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + +void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, + unsigned long int num_msdus, + enum ath10k_pkt_rx_err err, + unsigned long int unchain_cnt, + unsigned long int drop_cnt, + unsigned long int drop_cnt_filter, + unsigned long int queued_msdus) +{ + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + struct ieee80211_hdr *hdr; + struct ath10k_sta_tid_stats *stats; + u8 tid = IEEE80211_NUM_TIDS; + bool non_data_frm = false; + + hdr = (struct ieee80211_hdr *)first_hdr; + if (!ieee80211_is_data(hdr->frame_control)) + non_data_frm = true; + + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + + if (!(ar->sta_tid_stats_mask & BIT(tid)) || non_data_frm) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, hdr->addr2, NULL); + if (!sta) + goto exit; + + arsta = (struct ath10k_sta *)sta->drv_priv; + + spin_lock_bh(&ar->data_lock); + stats = &arsta->tid_stats[tid]; + stats->rx_pkt_from_fw += num_msdus; + stats->rx_pkt_unchained += unchain_cnt; + stats->rx_pkt_drop_chained += drop_cnt; + stats->rx_pkt_drop_filter += drop_cnt_filter; + if (err != ATH10K_PKT_RX_ERR_MAX) + stats->rx_pkt_err[err] += queued_msdus; + stats->rx_pkt_queued_for_mac += queued_msdus; + ath10k_rx_stats_update_amsdu_subfrm(ar, &arsta->tid_stats[tid], + num_msdus); + spin_unlock_bh(&ar->data_lock); + +exit: + rcu_read_unlock(); +} + static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar, struct ath10k_fw_stats *stats) { @@ -342,6 +460,172 @@ static const struct file_operations fops_peer_debug_trigger = { .llseek = default_llseek, }; +static char *get_err_str(enum ath10k_pkt_rx_err i) +{ + switch (i) { + case ATH10K_PKT_RX_ERR_FCS: + return "fcs_err"; + case ATH10K_PKT_RX_ERR_TKIP: + return "tkip_err"; + case ATH10K_PKT_RX_ERR_CRYPT: + return "crypt_err"; + case ATH10K_PKT_RX_ERR_PEER_IDX_INVAL: + return "peer_idx_inval"; + case ATH10K_PKT_RX_ERR_MAX: + return "unknown"; + } + + return "unknown"; +} + +static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i) +{ + switch (i) { + case ATH10K_AMPDU_SUBFRM_NUM_10: + return "upto 10"; + case ATH10K_AMPDU_SUBFRM_NUM_20: + return "11-20"; + case ATH10K_AMPDU_SUBFRM_NUM_30: + return "21-30"; + case ATH10K_AMPDU_SUBFRM_NUM_40: + return "31-40"; + case ATH10K_AMPDU_SUBFRM_NUM_50: + return "41-50"; + case ATH10K_AMPDU_SUBFRM_NUM_60: + return "51-60"; + case ATH10K_AMPDU_SUBFRM_NUM_MORE: + return ">60"; + case ATH10K_AMPDU_SUBFRM_NUM_MAX: + return "0"; + } + + return "0"; +} + +static char *get_num_amsdu_subfrm_str(enum ath10k_amsdu_subfrm_num i) +{ + switch (i) { + case ATH10K_AMSDU_SUBFRM_NUM_1: + return "1"; + case ATH10K_AMSDU_SUBFRM_NUM_2: + return "2"; + case ATH10K_AMSDU_SUBFRM_NUM_3: + return "3"; + case ATH10K_AMSDU_SUBFRM_NUM_4: + return "4"; + case ATH10K_AMSDU_SUBFRM_NUM_MORE: + return ">4"; + case ATH10K_AMSDU_SUBFRM_NUM_MAX: + return "0"; + } + + return "0"; +} + +#define PRINT_TID_STATS(_field, _tabs) \ + do { \ + int k = 0; \ + for (j = 0; j <= IEEE80211_NUM_TIDS; j++) { \ + if (ar->sta_tid_stats_mask & BIT(j)) { \ + len += scnprintf(buf + len, buf_len - len, \ + "[%02d] %-10lu ", \ + j, stats[j]._field); \ + k++; \ + if (k % 8 == 0) { \ + len += scnprintf(buf + len, \ + buf_len - len, "\n"); \ + len += scnprintf(buf + len, \ + buf_len - len, \ + _tabs); \ + } \ + } \ + } \ + len += scnprintf(buf + len, buf_len - len, "\n"); \ + } while (0) + +static ssize_t ath10k_dbg_sta_read_tid_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + struct ath10k_sta_tid_stats *stats = arsta->tid_stats; + size_t len = 0, buf_len = 1048 * IEEE80211_NUM_TIDS; + char *buf; + int i, j; + ssize_t ret; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + + len += scnprintf(buf + len, buf_len - len, + "\n\t\tDriver Rx pkt stats per tid, ([tid] count)\n"); + len += scnprintf(buf + len, buf_len - len, + "\t\t------------------------------------------\n"); + len += scnprintf(buf + len, buf_len - len, "MSDUs from FW\t\t\t"); + PRINT_TID_STATS(rx_pkt_from_fw, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, "MSDUs unchained\t\t\t"); + PRINT_TID_STATS(rx_pkt_unchained, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs locally dropped:chained\t"); + PRINT_TID_STATS(rx_pkt_drop_chained, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs locally dropped:filtered\t"); + PRINT_TID_STATS(rx_pkt_drop_filter, "\t\t\t\t"); + + len += scnprintf(buf + len, buf_len - len, + "MSDUs queued for mac80211\t"); + PRINT_TID_STATS(rx_pkt_queued_for_mac, "\t\t\t\t"); + + for (i = 0; i < ATH10K_PKT_RX_ERR_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "MSDUs with error:%s\t", get_err_str(i)); + PRINT_TID_STATS(rx_pkt_err[i], "\t\t\t\t"); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + for (i = 0; i < ATH10K_AMPDU_SUBFRM_NUM_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "A-MPDU num subframes %s\t", + get_num_ampdu_subfrm_str(i)); + PRINT_TID_STATS(rx_pkt_ampdu[i], "\t\t\t\t"); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + for (i = 0; i < ATH10K_AMSDU_SUBFRM_NUM_MAX; i++) { + len += scnprintf(buf + len, buf_len - len, + "A-MSDU num subframes %s\t\t", + get_num_amsdu_subfrm_str(i)); + PRINT_TID_STATS(rx_pkt_amsdu[i], "\t\t\t\t"); + } + + spin_unlock_bh(&ar->data_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_tid_stats_dump = { + .open = simple_open, + .read = ath10k_dbg_sta_read_tid_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { @@ -351,4 +635,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, debugfs_create_file("delba", 0200, dir, sta, &fops_delba); debugfs_create_file("peer_debug_trigger", 0600, dir, sta, &fops_peer_debug_trigger); + debugfs_create_file("dump_tid_stats", 0400, dir, sta, + &fops_tid_stats_dump); } diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6d96f9560950..5e02e26158f6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -723,6 +724,28 @@ struct amsdu_subframe_hdr { #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) +static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) +{ + u8 ret = 0; + + switch (bw) { + case 0: + ret = RATE_INFO_BW_20; + break; + case 1: + ret = RATE_INFO_BW_40; + break; + case 2: + ret = RATE_INFO_BW_80; + break; + case 3: + ret = RATE_INFO_BW_160; + break; + } + + return ret; +} + static void ath10k_htt_rx_h_rates(struct ath10k *ar, struct ieee80211_rx_status *status, struct htt_rx_desc *rxd) @@ -825,23 +848,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, if (sgi) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - switch (bw) { - /* 20MHZ */ - case 0: - break; - /* 40MHZ */ - case 1: - status->bw = RATE_INFO_BW_40; - break; - /* 80MHZ */ - case 2: - status->bw = RATE_INFO_BW_80; - break; - case 3: - status->bw = RATE_INFO_BW_160; - break; - } - + status->bw = ath10k_bw_to_mac80211_bw(bw); status->encoding = RX_ENC_VHT; break; default: @@ -1502,7 +1509,9 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *status, - bool fill_crypt_header) + bool fill_crypt_header, + u8 *rx_hdr, + enum ath10k_pkt_rx_err *err) { struct sk_buff *first; struct sk_buff *last; @@ -1538,6 +1547,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, hdr = (void *)rxd->rx_hdr_status; memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); + if (rx_hdr) + memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); + /* Each A-MSDU subframe will use the original header as the base and be * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. */ @@ -1581,6 +1593,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, if (has_tkip_err) status->flag |= RX_FLAG_MMIC_ERROR; + if (err) { + if (has_fcs_err) + *err = ATH10K_PKT_RX_ERR_FCS; + else if (has_tkip_err) + *err = ATH10K_PKT_RX_ERR_TKIP; + else if (has_crypto_err) + *err = ATH10K_PKT_RX_ERR_CRYPT; + else if (has_peer_idx_invalid) + *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; + } + /* Firmware reports all necessary management frames via WMI already. * They are not reported to monitor interfaces at all so pass the ones * coming via HTT to monitor interfaces instead. This simplifies @@ -1651,11 +1674,13 @@ static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, } } -static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) +static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, + unsigned long int *unchain_cnt) { struct sk_buff *skb, *first; int space; int total_len = 0; + int amsdu_len = skb_queue_len(amsdu); /* TODO: Might could optimize this by using * skb_try_coalesce or similar method to @@ -1691,11 +1716,16 @@ static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) } __skb_queue_head(amsdu, first); + + *unchain_cnt += amsdu_len - 1; + return 0; } static void ath10k_htt_rx_h_unchain(struct ath10k *ar, - struct sk_buff_head *amsdu) + struct sk_buff_head *amsdu, + unsigned long int *drop_cnt, + unsigned long int *unchain_cnt) { struct sk_buff *first; struct htt_rx_desc *rxd; @@ -1713,11 +1743,12 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar, */ if (decap != RX_MSDU_DECAP_RAW || skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { + *drop_cnt += skb_queue_len(amsdu); __skb_queue_purge(amsdu); return; } - ath10k_unchain_msdu(amsdu); + ath10k_unchain_msdu(amsdu, unchain_cnt); } static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, @@ -1743,7 +1774,8 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, static void ath10k_htt_rx_h_filter(struct ath10k *ar, struct sk_buff_head *amsdu, - struct ieee80211_rx_status *rx_status) + struct ieee80211_rx_status *rx_status, + unsigned long int *drop_cnt) { if (skb_queue_empty(amsdu)) return; @@ -1751,6 +1783,9 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar, if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) return; + if (drop_cnt) + *drop_cnt += skb_queue_len(amsdu); + __skb_queue_purge(amsdu); } @@ -1760,6 +1795,12 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) struct ieee80211_rx_status *rx_status = &htt->rx_status; struct sk_buff_head amsdu; int ret; + unsigned long int drop_cnt = 0; + unsigned long int unchain_cnt = 0; + unsigned long int drop_cnt_filter = 0; + unsigned long int msdus_to_queue, num_msdus; + enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; + u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; __skb_queue_head_init(&amsdu); @@ -1781,16 +1822,23 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) return ret; } + num_msdus = skb_queue_len(&amsdu); + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); /* only for ret = 1 indicates chained msdus */ if (ret > 0) - ath10k_htt_rx_h_unchain(ar, &amsdu); + ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); - ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); + msdus_to_queue = skb_queue_len(&amsdu); ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); + ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, + unchain_cnt, drop_cnt, drop_cnt_filter, + msdus_to_queue); + return 0; } @@ -1801,9 +1849,14 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, struct htt_rx_indication_mpdu_range *mpdu_ranges; int num_mpdu_ranges; int i, mpdu_count = 0; + u16 peer_id; + u8 tid; num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + peer_id = __le16_to_cpu(rx->hdr.peer_id); + tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); + mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", @@ -1815,6 +1868,9 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, mpdu_count += mpdu_ranges[i].mpdu_count; atomic_add(mpdu_count, &htt->num_mpdus_ready); + + ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, + num_mpdu_ranges); } static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, @@ -2124,8 +2180,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * should still give an idea about rx rate to the user. */ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); - ath10k_htt_rx_h_filter(ar, &amsdu, status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false); + ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); + ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, + NULL); ath10k_htt_rx_h_enqueue(ar, &amsdu, status); break; case -EAGAIN: @@ -2499,7 +2556,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; arsta->txrate.nss = txrate.nss; - arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20; + arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); } static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ebb3f1b046f3..bf05a3689558 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2976,7 +2977,7 @@ static int ath10k_station_assoc(struct ath10k *ar, } /* Plumb cached keys only for static WEP */ - if (arvif->def_wep_key_idx != -1) { + if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { ret = ath10k_install_peer_wep_keys(arvif, sta->addr); if (ret) { ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", @@ -3808,6 +3809,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); struct sk_buff *skb; + dma_addr_t paddr; int ret; for (;;) { @@ -3815,11 +3817,27 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) if (!skb) break; - ret = ath10k_wmi_mgmt_tx(ar, skb); - if (ret) { - ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", - ret); - ieee80211_free_txskb(ar->hw, skb); + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + paddr = dma_map_single(ar->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (!paddr) + continue; + ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", + ret); + dma_unmap_single(ar->dev, paddr, skb->len, + DMA_FROM_DEVICE); + ieee80211_free_txskb(ar->hw, skb); + } + } else { + ret = ath10k_wmi_mgmt_tx(ar, skb); + if (ret) { + ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", + ret); + ieee80211_free_txskb(ar->hw, skb); + } } } } @@ -5914,6 +5932,10 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); spin_unlock_bh(&ar->data_lock); + if (sta && sta->tdls) + ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + WMI_PEER_AUTHORIZE, 1); + exit: mutex_unlock(&ar->conf_mutex); return ret; @@ -6028,9 +6050,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) sta->addr, smps, err); } - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || - changed & IEEE80211_RC_NSS_CHANGED) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); err = ath10k_station_assoc(ar, arvif->vif, sta, true); @@ -7085,10 +7106,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; u32 bw, smps; spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", + sta->addr, arvif->vdev_id); + return; + } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->bandwidth, sta->rx_nss, @@ -7874,6 +7905,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { .max_interfaces = 8, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | @@ -7997,6 +8029,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { .max_interfaces = 16, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | @@ -8298,6 +8331,9 @@ int ath10k_mac_register(struct ath10k *ar) ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); } + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ar->hw->wiphy->max_remain_on_channel_duration = 5000; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 1b266cd0c2ec..fd1566cd7d2b 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -57,6 +57,10 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); */ #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 +#define QCA99X0_PCIE_BAR0_START_REG 0x81030 +#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c +#define QCA99X0_CPU_MEM_DATA_REG 0x4d010 + static const struct pci_device_id ath10k_pci_id_table[] = { /* PCI-E QCA988X V2 (Ubiquiti branded) */ { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, @@ -1584,6 +1588,69 @@ static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) return 0; } +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_pci_dump_memory_sram(struct ath10k *ar, + const struct ath10k_mem_region *region, + u8 *buf) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 base_addr, i; + + base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); + base_addr += region->start; + + for (i = 0; i < region->len; i += 4) { + iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); + *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); + } + + return region->len; +} + +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_pci_dump_memory_reg(struct ath10k *ar, + const struct ath10k_mem_region *region, + u8 *buf) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 i; + + for (i = 0; i < region->len; i += 4) + *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); + + return region->len; +} + +/* if an error happened returns < 0, otherwise the length */ +static int ath10k_pci_dump_memory_generic(struct ath10k *ar, + const struct ath10k_mem_region *current_region, + u8 *buf) +{ + int ret; + + if (current_region->section_table.size > 0) + /* Copy each section individually. */ + return ath10k_pci_dump_memory_section(ar, + current_region, + buf, + current_region->len); + + /* No individiual memory sections defined so we can + * copy the entire memory region. + */ + ret = ath10k_pci_diag_read_mem(ar, + current_region->start, + buf, + current_region->len); + if (ret) { + ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", + current_region->name, ret); + return ret; + } + + return current_region->len; +} + static void ath10k_pci_dump_memory(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { @@ -1642,27 +1709,20 @@ static void ath10k_pci_dump_memory(struct ath10k *ar, buf += sizeof(*hdr); buf_len -= sizeof(*hdr); - if (current_region->section_table.size > 0) { - /* Copy each section individually. */ - count = ath10k_pci_dump_memory_section(ar, - current_region, - buf, - current_region->len); - } else { - /* No individiual memory sections defined so we can - * copy the entire memory region. - */ - ret = ath10k_pci_diag_read_mem(ar, - current_region->start, - buf, - current_region->len); - if (ret) { - ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", - current_region->name, ret); + switch (current_region->type) { + case ATH10K_MEM_REGION_TYPE_IOSRAM: + count = ath10k_pci_dump_memory_sram(ar, current_region, buf); + break; + case ATH10K_MEM_REGION_TYPE_IOREG: + count = ath10k_pci_dump_memory_reg(ar, current_region, buf); + break; + default: + ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); + if (ret < 0) break; - } - count = current_region->len; + count = ret; + break; } hdr->region_type = cpu_to_le32(current_region->type); @@ -2221,7 +2281,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) } break; case QCA9377_1_0_DEVICE_ID: - return 4; + return 9; } ath10k_warn(ar, "unknown number of banks, assuming 1\n"); @@ -3718,5 +3778,6 @@ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA9377 1.0 firmware files */ +MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index e40edced1d82..7d2fac342150 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump, ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len, - int ret), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len), - TP_ARGS(ar, id, buf, buf_len, ret), + TP_ARGS(ar, id, buf, buf_len), TP_STRUCT__entry( __string(device, dev_name(ar->dev)) @@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd, __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) - __field(int, ret) ), TP_fast_assign( @@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd, __assign_str(driver, dev_driver_string(ar->dev)); __entry->id = id; __entry->buf_len = buf_len; - __entry->ret = ret; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "%s %s id %d len %zu ret %d", + "%s %s id %d len %zu", __get_str(driver), __get_str(device), __entry->id, - __entry->buf_len, - __entry->ret + __entry->buf_len ) ); diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 5b3b021526ab..70e23bbf7171 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -102,11 +102,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, memset(&info->status, 0, sizeof(info->status)); trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); - if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) { - ieee80211_free_txskb(htt->ar->hw, msdu); - return 0; - } - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; @@ -117,6 +112,13 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) { + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; + } + ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 14093cfdc505..c35e45340b4f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -125,6 +126,9 @@ struct wmi_ops { enum wmi_force_fw_hang_type type, u32 delay_ms); struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); + struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, + struct sk_buff *skb, + dma_addr_t paddr); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); @@ -197,6 +201,9 @@ struct wmi_ops { (struct ath10k *ar, enum wmi_bss_survey_req_type type); struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); + struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, + u32 param); + }; int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); @@ -372,12 +379,33 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) } static inline int +ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_mgmt_tx_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_send_cmdid); + if (ret) + return ret; + + return 0; +} + +static inline int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct sk_buff *skb; int ret; - u32 mgmt_tx_cmdid; if (!ar->wmi.ops->gen_mgmt_tx) return -EOPNOTSUPP; @@ -386,13 +414,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) if (IS_ERR(skb)) return PTR_ERR(skb); - if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, - ar->running_fw->fw_file.fw_features)) - mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid; - else - mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid; - - ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid); + ret = ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->mgmt_tx_cmdid); if (ret) return ret; @@ -1425,4 +1448,21 @@ ath10k_wmi_echo(struct ath10k *ar, u32 value) return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); } +static inline int +ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_tpc_table_cmdid); +} + #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index ae77a007ae07..9d1b0a459069 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -412,6 +413,62 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar, + struct sk_buff *skb) +{ + const struct wmi_tlv_pdev_temperature_event *ev; + + ev = (struct wmi_tlv_pdev_temperature_event *)skb->data; + if (WARN_ON(skb->len < sizeof(*ev))) + return -EPROTO; + + ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); + return 0; +} + +static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_sta *station; + const struct wmi_tlv_tdls_peer_event *ev; + const void **tb; + struct ath10k_vif *arvif; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ath10k_warn(ar, "tdls peer failed to parse tlv"); + return; + } + ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT]; + if (!ev) { + kfree(tb); + ath10k_warn(ar, "tdls peer NULL event"); + return; + } + + switch (__le32_to_cpu(ev->peer_reason)) { + case WMI_TDLS_TEARDOWN_REASON_TX: + case WMI_TDLS_TEARDOWN_REASON_RSSI: + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + station = ieee80211_find_sta_by_ifaddr(ar->hw, + ev->peer_macaddr.addr, + NULL); + if (!station) { + ath10k_warn(ar, "did not find station from tdls peer event"); + kfree(tb); + return; + } + arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id)); + ieee80211_tdls_oper_request( + arvif->vif, station->addr, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC + ); + break; + } + kfree(tb); +} + /***********/ /* TLV ops */ /***********/ @@ -552,6 +609,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_TX_PAUSE_EVENTID: ath10k_wmi_tlv_event_tx_pause(ar, skb); break; + case WMI_TLV_PDEV_TEMPERATURE_EVENTID: + ath10k_wmi_tlv_event_temperature(ar, skb); + break; + case WMI_TLV_TDLS_PEER_EVENTID: + ath10k_wmi_event_tdls_peer(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -2484,19 +2547,19 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) } static struct sk_buff * -ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) +ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, + dma_addr_t paddr) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); struct wmi_tlv_mgmt_tx_cmd *cmd; - struct wmi_tlv *tlv; struct ieee80211_hdr *hdr; + struct ath10k_vif *arvif; + u32 buf_len = msdu->len; + struct wmi_tlv *tlv; struct sk_buff *skb; + u32 vdev_id; void *ptr; int len; - u32 buf_len = msdu->len; - struct ath10k_vif *arvif; - dma_addr_t mgmt_frame_dma; - u32 vdev_id; if (!cb->vif) return ERR_PTR(-EINVAL); @@ -2537,12 +2600,7 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) cmd->chanfreq = 0; cmd->buf_len = __cpu_to_le32(buf_len); cmd->frame_len = __cpu_to_le32(msdu->len); - mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data, - msdu->len, DMA_TO_DEVICE); - if (!mgmt_frame_dma) - return ERR_PTR(-ENOMEM); - - cmd->paddr = __cpu_to_le64(mgmt_frame_dma); + cmd->paddr = __cpu_to_le64(paddr); ptr += sizeof(*tlv); ptr += sizeof(*cmd); @@ -2662,6 +2720,25 @@ ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) } static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar) +{ + struct wmi_tlv_pdev_get_temp_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n"); + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) { struct wmi_tlv_pktlog_disable *cmd; @@ -2855,6 +2932,15 @@ ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, */ u32 options = 0; + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + options |= WMI_TLV_TDLS_BUFFER_STA_EN; + + /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS + * link inactivity detecting logic. + */ + if (state == WMI_TDLS_ENABLE_ACTIVE) + state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL; + len = sizeof(*tlv) + sizeof(*cmd); skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) @@ -3443,7 +3529,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID, .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, - .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, + .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID, .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, @@ -3701,12 +3787,12 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ - .gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx, + .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, /* .gen_pdev_set_quiet_mode not implemented */ - /* .gen_pdev_get_temperature not implemented */ + .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, /* .gen_addba_clear_resp not implemented */ /* .gen_addba_send not implemented */ /* .gen_addba_set_resp not implemented */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index da89128e8dd6..fa3773ec7c68 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1340,6 +1340,17 @@ struct wmi_tlv_init_cmd { __le32 num_host_mem_chunks; } __packed; +struct wmi_tlv_pdev_get_temp_cmd { + __le32 pdev_id; /* not used */ +} __packed; + +struct wmi_tlv_pdev_temperature_event { + __le32 tlv_hdr; + /* temperature value in Celcius degree */ + __le32 temperature; + __le32 pdev_id; +} __packed; + struct wmi_tlv_pdev_set_param_cmd { __le32 pdev_id; /* not used yet */ __le32 param_id; @@ -1746,6 +1757,13 @@ struct wmi_tlv_tx_pause_ev { __le32 tid_map; } __packed; +struct wmi_tlv_tdls_peer_event { + struct wmi_mac_addr peer_macaddr; + __le32 peer_status; + __le32 peer_reason; + __le32 vdev_id; +} __packed; + void ath10k_wmi_tlv_attach(struct ath10k *ar); struct wmi_tlv_mgmt_tx_cmd { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 58dc2189ba49..c5e1ca5945db 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -196,6 +197,7 @@ static struct wmi_cmd_map wmi_cmd_map = { .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.X WMI cmd track */ @@ -362,6 +364,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.2.4 WMI cmd track */ @@ -528,6 +531,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.4 WMI cmd track */ @@ -1480,6 +1484,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { @@ -1742,8 +1747,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, cmd_hdr->cmd_id = __cpu_to_le32(cmd); memset(skb_cb, 0, sizeof(*skb_cb)); + trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len); ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); - trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret); if (ret) goto err_pull; @@ -2703,6 +2708,28 @@ ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src, dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); } +static void +ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src, + struct ath10k_fw_stats_vdev_extd *dst) +{ + dst->vdev_id = __le32_to_cpu(src->vdev_id); + dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt); + dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack); + dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued); + dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt); + dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued); + dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry); + dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry); + dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry); + dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc); + dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry); + dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail); + dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt); + dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt); + dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt); + dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt); +} + static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) @@ -3042,7 +3069,16 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, */ } - /* fw doesn't implement vdev stats */ + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_vdev_stats *src; + + /* Ignore vdev stats here as it has only vdev id. Actual vdev + * stats will be retrieved from vdev extended stats. + */ + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + } for (i = 0; i < num_peer_stats; i++) { const struct wmi_10_4_peer_stats *src; @@ -3074,26 +3110,43 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, */ } - if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0) - return 0; + if (stats_id & WMI_10_4_STAT_PEER_EXTD) { + stats->extended = true; - stats->extended = true; + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_4_peer_extd_stats *src; + struct ath10k_fw_extd_stats_peer *dst; - for (i = 0; i < num_peer_stats; i++) { - const struct wmi_10_4_peer_extd_stats *src; - struct ath10k_fw_extd_stats_peer *dst; + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; - src = (void *)skb->data; - if (!skb_pull(skb, sizeof(*src))) - return -EPROTO; + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; - dst = kzalloc(sizeof(*dst), GFP_ATOMIC); - if (!dst) - continue; + ether_addr_copy(dst->peer_macaddr, + src->peer_macaddr.addr); + dst->rx_duration = __le32_to_cpu(src->rx_duration); + list_add_tail(&dst->list, &stats->peers_extd); + } + } + + if (stats_id & WMI_10_4_STAT_VDEV_EXTD) { + for (i = 0; i < num_vdev_stats; i++) { + const struct wmi_vdev_stats_extd *src; + struct ath10k_fw_stats_vdev_extd *dst; - ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); - dst->rx_duration = __le32_to_cpu(src->rx_duration); - list_add_tail(&dst->list, &stats->peers_extd); + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + ath10k_wmi_10_4_pull_vdev_stats(src, dst); + list_add_tail(&dst->list, &stats->vdevs); + } } return 0; @@ -4313,19 +4366,11 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, } } -void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) +void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, + u32 num_tx_chain) { - u32 i, j, pream_idx, num_tx_chain; - u8 rate_code[WMI_TPC_RATE_MAX], rate_idx; - u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; - struct wmi_pdev_tpc_config_event *ev; - struct ath10k_tpc_stats *tpc_stats; - - ev = (struct wmi_pdev_tpc_config_event *)skb->data; - - tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); - if (!tpc_stats) - return; + u32 i, j, pream_idx; + u8 rate_idx; /* Create the rate code table based on the chains supported */ rate_idx = 0; @@ -4349,8 +4394,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) pream_table[pream_idx] = rate_idx; pream_idx++; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - /* Fill HT20 rate code */ for (i = 0; i < num_tx_chain; i++) { for (j = 0; j < 8; j++) { @@ -4374,7 +4417,7 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) pream_idx++; /* Fill VHT20 rate code */ - for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) { + for (i = 0; i < num_tx_chain; i++) { for (j = 0; j < 10; j++) { rate_code[rate_idx] = ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); @@ -4418,6 +4461,26 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END; +} + +void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) +{ + u32 num_tx_chain; + u8 rate_code[WMI_TPC_RATE_MAX]; + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; + struct wmi_pdev_tpc_config_event *ev; + struct ath10k_tpc_stats *tpc_stats; + + ev = (struct wmi_pdev_tpc_config_event *)skb->data; + + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, + num_tx_chain); tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); @@ -4457,6 +4520,246 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->rate_max)); } +static u8 +ath10k_wmi_tpc_final_get_rate(struct ath10k *ar, + struct wmi_pdev_tpc_final_table_event *ev, + u32 rate_idx, u32 num_chains, + u32 rate_code, u8 type, u32 pream_idx) +{ + u8 tpc, num_streams, preamble, ch, stm_idx; + s8 pow_agcdd, pow_agstbc, pow_agtxbf; + int pream; + + num_streams = ATH10K_HW_NSS(rate_code); + preamble = ATH10K_HW_PREAMBLE(rate_code); + ch = num_chains - 1; + stm_idx = num_streams - 1; + pream = -1; + + if (__le32_to_cpu(ev->chan_freq) <= 2483) { + switch (pream_idx) { + case WMI_TPC_PREAM_2GHZ_CCK: + pream = 0; + break; + case WMI_TPC_PREAM_2GHZ_OFDM: + pream = 1; + break; + case WMI_TPC_PREAM_2GHZ_HT20: + case WMI_TPC_PREAM_2GHZ_VHT20: + pream = 2; + break; + case WMI_TPC_PREAM_2GHZ_HT40: + case WMI_TPC_PREAM_2GHZ_VHT40: + pream = 3; + break; + case WMI_TPC_PREAM_2GHZ_VHT80: + pream = 4; + break; + default: + pream = -1; + break; + } + } + + if (__le32_to_cpu(ev->chan_freq) >= 5180) { + switch (pream_idx) { + case WMI_TPC_PREAM_5GHZ_OFDM: + pream = 0; + break; + case WMI_TPC_PREAM_5GHZ_HT20: + case WMI_TPC_PREAM_5GHZ_VHT20: + pream = 1; + break; + case WMI_TPC_PREAM_5GHZ_HT40: + case WMI_TPC_PREAM_5GHZ_VHT40: + pream = 2; + break; + case WMI_TPC_PREAM_5GHZ_VHT80: + pream = 3; + break; + case WMI_TPC_PREAM_5GHZ_HTCUP: + pream = 4; + break; + default: + pream = -1; + break; + } + } + + if (pream == 4) + tpc = min_t(u8, ev->rates_array[rate_idx], + ev->max_reg_allow_pow[ch]); + else + tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx], + ev->max_reg_allow_pow[ch]), + ev->ctl_power_table[0][pream][stm_idx]); + + if (__le32_to_cpu(ev->num_tx_chain) <= 1) + goto out; + + if (preamble == WMI_RATE_PREAMBLE_CCK) + goto out; + + if (num_chains <= num_streams) + goto out; + + switch (type) { + case WMI_TPC_TABLE_TYPE_STBC: + pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agstbc); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agstbc), + ev->ctl_power_table[0][pream][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agtxbf); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf), + ev->ctl_power_table[1][pream][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_CDD: + pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]; + if (pream == 4) + tpc = min_t(u8, tpc, pow_agcdd); + else + tpc = min_t(u8, min_t(u8, tpc, pow_agcdd), + ev->ctl_power_table[0][pream][stm_idx]); + break; + default: + ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type); + tpc = 0; + break; + } + +out: + return tpc; +} + +static void +ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, + struct wmi_pdev_tpc_final_table_event *ev, + struct ath10k_tpc_stats_final *tpc_stats, + u8 *rate_code, u16 *pream_table, u8 type) +{ + u32 i, j, pream_idx, flags; + u8 tpc[WMI_TPC_TX_N_CHAIN]; + char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; + char buff[WMI_TPC_BUF_SIZE]; + + flags = __le32_to_cpu(ev->flags); + + switch (type) { + case WMI_TPC_TABLE_TYPE_CDD: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "invalid table type in wmi tpc event: %d\n", type); + return; + } + + pream_idx = 0; + for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + memset(tpc_value, 0, sizeof(tpc_value)); + memset(buff, 0, sizeof(buff)); + if (i == pream_table[pream_idx]) + pream_idx++; + + for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { + if (j >= __le32_to_cpu(ev->num_tx_chain)) + break; + + tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, + rate_code[i], + type, pream_idx); + snprintf(buff, sizeof(buff), "%8d ", tpc[j]); + strncat(tpc_value, buff, strlen(buff)); + } + tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx; + tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i]; + memcpy(tpc_stats->tpc_table_final[type].tpc_value[i], + tpc_value, sizeof(tpc_value)); + } +} + +void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) +{ + u32 num_tx_chain; + u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; + struct wmi_pdev_tpc_final_table_event *ev; + struct ath10k_tpc_stats_final *tpc_stats; + + ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; + + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, + num_tx_chain); + + tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); + tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); + tpc_stats->ctl = __le32_to_cpu(ev->ctl); + tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); + tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); + tpc_stats->twice_antenna_reduction = + __le32_to_cpu(ev->twice_antenna_reduction); + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); + tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_CDD); + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_STBC); + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_TXBF); + + ath10k_debug_tpc_stats_final_process(ar, tpc_stats); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", + __le32_to_cpu(ev->chan_freq), + __le32_to_cpu(ev->phy_mode), + __le32_to_cpu(ev->ctl), + __le32_to_cpu(ev->reg_domain), + a_sle32_to_cpu(ev->twice_antenna_gain), + __le32_to_cpu(ev->twice_antenna_reduction), + __le32_to_cpu(ev->power_limit), + __le32_to_cpu(ev->twice_max_rd_power) / 2, + __le32_to_cpu(ev->num_tx_chain), + __le32_to_cpu(ev->rate_max)); +} + static void ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb) { @@ -5531,6 +5834,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: case WMI_10_4_PEER_RATECODE_LIST_EVENTID: case WMI_10_4_WDS_PEER_EVENTID: + case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID: ath10k_dbg(ar, ATH10K_DBG_WMI, "received event id %d not implemented\n", id); break; @@ -5549,6 +5853,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_TDLS_PEER_EVENTID: ath10k_wmi_handle_tdls_peer_event(ar, skb); break; + case WMI_10_4_PDEV_TPC_TABLE_EVENTID: + ath10k_wmi_event_tpc_final_table(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -7745,6 +8052,72 @@ ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, return skb; } +static void +ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + u32 val; + + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu aggr count", vdev->ppdu_aggr_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu noack", vdev->ppdu_noack); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu queued", vdev->mpdu_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu sw requeued", vdev->mpdu_sw_requeued); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu success retry", vdev->mpdu_suc_retry); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu success multitry", vdev->mpdu_suc_multitry); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "mpdu fail retry", vdev->mpdu_fail_retry); + val = vdev->tx_ftm_suc; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm success", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->tx_ftm_suc_retry; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm success retry", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->tx_ftm_fail; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "tx ftm fail", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_ftmr_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx ftm request count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_ftmr_dup_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx ftm request dup count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_iftmr_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx initial ftm req count", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + val = vdev->rx_iftmr_dup_cnt; + if (val & WMI_VDEV_STATS_FTM_COUNT_VALID) + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "rx initial ftm req dup cnt", + MS(val, WMI_VDEV_STATS_FTM_COUNT)); + len += scnprintf(buf + len, buf_len - len, "\n"); + + *length = len; +} + void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, char *buf) @@ -7752,7 +8125,7 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, u32 len = 0; u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; const struct ath10k_fw_stats_pdev *pdev; - const struct ath10k_fw_stats_vdev *vdev; + const struct ath10k_fw_stats_vdev_extd *vdev; const struct ath10k_fw_stats_peer *peer; size_t num_peers; size_t num_vdevs; @@ -7805,9 +8178,8 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, "ath10k VDEV stats", num_vdevs); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); - list_for_each_entry(vdev, &fw_stats->vdevs, list) { - ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); + ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len); } len += scnprintf(buf + len, buf_len - len, "\n"); @@ -7990,6 +8362,24 @@ static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp) } static struct sk_buff * +ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) +{ + struct wmi_pdev_get_tpc_table_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data; + cmd->param = __cpu_to_le32(param); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev get tpc table param:%d\n", param); + return skb; +} + +static struct sk_buff * ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, const struct wmi_tdls_peer_update_cmd_arg *arg, const struct wmi_tdls_peer_capab_arg *cap, @@ -8430,6 +8820,8 @@ static const struct wmi_ops wmi_10_4_ops = { .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state, .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, + .gen_pdev_get_tpc_table_cmdid = + ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index c7b30ed9015d..6fbc84c29521 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -197,6 +198,9 @@ enum wmi_service { WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, WMI_SERVICE_MGMT_TX_WMI, WMI_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_SERVICE_TPC_STATS_FINAL, /* keep last */ WMI_SERVICE_MAX, @@ -339,6 +343,9 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_10_4_SERVICE_TPC_STATS_FINAL, }; static inline char *wmi_service_name(int service_id) @@ -448,6 +455,9 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH); + SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS); + SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT); + SVCSTR(WMI_SERVICE_TPC_STATS_FINAL); default: return NULL; } @@ -746,6 +756,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len); SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len); + SVCMAP(WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); + SVCMAP(WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT, + WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL, + WMI_SERVICE_TPC_STATS_FINAL, len); } #undef SVCMAP @@ -3993,10 +4009,12 @@ struct wmi_pdev_get_tpc_config_cmd { #define WMI_TPC_CONFIG_PARAM 1 #define WMI_TPC_RATE_MAX 160 +#define WMI_TPC_FINAL_RATE_MAX 240 #define WMI_TPC_TX_N_CHAIN 4 #define WMI_TPC_PREAM_TABLE_MAX 10 #define WMI_TPC_FLAG 3 #define WMI_TPC_BUF_SIZE 10 +#define WMI_TPC_BEAMFORMING 2 enum wmi_tpc_table_type { WMI_TPC_TABLE_TYPE_CDD = 0, @@ -4039,6 +4057,51 @@ enum wmi_tp_scale { WMI_TP_SCALE_SIZE = 5, /* max num of enum */ }; +struct wmi_pdev_tpc_final_table_event { + __le32 reg_domain; + __le32 chan_freq; + __le32 phy_mode; + __le32 twice_antenna_reduction; + __le32 twice_max_rd_power; + a_sle32 twice_antenna_gain; + __le32 power_limit; + __le32 rate_max; + __le32 num_tx_chain; + __le32 ctl; + __le32 flags; + s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + u8 rates_array[WMI_TPC_FINAL_RATE_MAX]; + u8 ctl_power_table[WMI_TPC_BEAMFORMING][WMI_TPC_TX_N_CHAIN] + [WMI_TPC_TX_N_CHAIN]; +} __packed; + +struct wmi_pdev_get_tpc_table_cmd { + __le32 param; +} __packed; + +enum wmi_tpc_pream_2ghz { + WMI_TPC_PREAM_2GHZ_CCK = 0, + WMI_TPC_PREAM_2GHZ_OFDM, + WMI_TPC_PREAM_2GHZ_HT20, + WMI_TPC_PREAM_2GHZ_HT40, + WMI_TPC_PREAM_2GHZ_VHT20, + WMI_TPC_PREAM_2GHZ_VHT40, + WMI_TPC_PREAM_2GHZ_VHT80, +}; + +enum wmi_tpc_pream_5ghz { + WMI_TPC_PREAM_5GHZ_OFDM = 1, + WMI_TPC_PREAM_5GHZ_HT20, + WMI_TPC_PREAM_5GHZ_HT40, + WMI_TPC_PREAM_5GHZ_VHT20, + WMI_TPC_PREAM_5GHZ_VHT40, + WMI_TPC_PREAM_5GHZ_VHT80, + WMI_TPC_PREAM_5GHZ_HTCUP, +}; + struct wmi_pdev_chanlist_update_event { /* number of channels */ __le32 num_chan; @@ -4350,6 +4413,7 @@ enum wmi_10_4_stats_id { WMI_10_4_STAT_AP = BIT(1), WMI_10_4_STAT_INST = BIT(2), WMI_10_4_STAT_PEER_EXTD = BIT(3), + WMI_10_4_STAT_VDEV_EXTD = BIT(4), }; struct wlan_inst_rssi_args { @@ -4489,12 +4553,36 @@ struct wmi_10_4_pdev_stats { /* * VDEV statistics - * TODO: add all VDEV stats here */ + +#define WMI_VDEV_STATS_FTM_COUNT_VALID BIT(31) +#define WMI_VDEV_STATS_FTM_COUNT_LSB 0 +#define WMI_VDEV_STATS_FTM_COUNT_MASK 0x7fffffff + struct wmi_vdev_stats { __le32 vdev_id; } __packed; +struct wmi_vdev_stats_extd { + __le32 vdev_id; + __le32 ppdu_aggr_cnt; + __le32 ppdu_noack; + __le32 mpdu_queued; + __le32 ppdu_nonaggr_cnt; + __le32 mpdu_sw_requeued; + __le32 mpdu_suc_retry; + __le32 mpdu_suc_multitry; + __le32 mpdu_fail_retry; + __le32 tx_ftm_suc; + __le32 tx_ftm_suc_retry; + __le32 tx_ftm_fail; + __le32 rx_ftmr_cnt; + __le32 rx_ftmr_dup_cnt; + __le32 rx_iftmr_cnt; + __le32 rx_iftmr_dup_cnt; + __le32 reserved[6]; +} __packed; + /* * peer statistics. * TODO: add more stats @@ -6729,6 +6817,7 @@ enum wmi_tdls_state { WMI_TDLS_DISABLE, WMI_TDLS_ENABLE_PASSIVE, WMI_TDLS_ENABLE_ACTIVE, + WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL, }; enum wmi_tdls_peer_state { @@ -6979,5 +7068,8 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype); int ath10k_wmi_barrier(struct ath10k *ar); +void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, + u32 num_tx_chain); +void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb); #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 233054bd6b52..12d3a6c92ba4 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -327,7 +327,7 @@ int ath5k_hw_init(struct ath5k_hw *ah) ath5k_hw_set_lladdr(ah, zero_mac); /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ - memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); + eth_broadcast_addr(common->curbssid); ath5k_hw_set_bssid(ah); ath5k_hw_set_opmode(ah, ah->opmode); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 527afcf39246..a2351ef45ae0 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -73,16 +73,16 @@ #include "trace.h" bool ath5k_modparam_nohwcrypt; -module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool modparam_fastchanswitch; -module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); +module_param_named(fastchanswitch, modparam_fastchanswitch, bool, 0444); MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); static bool ath5k_modparam_no_hw_rfkill_switch; module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, - bool, S_IRUGO); + bool, 0444); MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index bd7f6d7b199e..3513bbec4639 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -1004,32 +1004,17 @@ ath5k_debug_init_device(struct ath5k_hw *ah) if (!phydir) return; - debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah, - &fops_debug); - - debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers); - - debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah, - &fops_beacon); - - debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset); - - debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah, - &fops_antenna); - - debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); - - debugfs_create_file("eeprom", S_IRUSR, phydir, ah, &fops_eeprom); - - debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, - &fops_frameerrors); - - debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani); - - debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah, - &fops_queue); - - debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir, + debugfs_create_file("debug", 0600, phydir, ah, &fops_debug); + debugfs_create_file("registers", 0400, phydir, ah, &fops_registers); + debugfs_create_file("beacon", 0600, phydir, ah, &fops_beacon); + debugfs_create_file("reset", 0200, phydir, ah, &fops_reset); + debugfs_create_file("antenna", 0600, phydir, ah, &fops_antenna); + debugfs_create_file("misc", 0400, phydir, ah, &fops_misc); + debugfs_create_file("eeprom", 0400, phydir, ah, &fops_eeprom); + debugfs_create_file("frameerrors", 0600, phydir, ah, &fops_frameerrors); + debugfs_create_file("ani", 0600, phydir, ah, &fops_ani); + debugfs_create_file("queue", 0600, phydir, ah, &fops_queue); + debugfs_create_bool("32khz_clock", 0600, phydir, &ah->ah_use_32khz_clock); } diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index beda11ce34a7..147947f632f7 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -327,8 +327,6 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); - tq = &ah->ah_txq[queue]; - /* Skip if queue inactive or if we are on AR5210 * that doesn't have QCU/DCU */ if ((ah->ah_version == AR5K_AR5210) || diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c index 25978c732fe1..8113baddd8fc 100644 --- a/drivers/net/wireless/ath/ath5k/sysfs.c +++ b/drivers/net/wireless/ath/ath5k/sysfs.c @@ -31,7 +31,7 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \ set(ah, val); \ return count; \ } \ -static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \ +static DEVICE_ATTR(name, 0644, \ ath5k_attr_show_##name, ath5k_attr_store_##name) #define SIMPLE_SHOW(name, get) \ @@ -43,7 +43,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \ struct ath5k_hw *ah = hw->priv; \ return snprintf(buf, PAGE_SIZE, "%d\n", get); \ } \ -static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL) +static DEVICE_ATTR(name, 0444, ath5k_attr_show_##name, NULL) /*** ANI ***/ @@ -66,7 +66,7 @@ static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev, { return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL); } -static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO, +static DEVICE_ATTR(noise_immunity_level_max, 0444, ath5k_attr_show_noise_immunity_level_max, NULL); static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev, @@ -75,7 +75,7 @@ static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev, { return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL); } -static DEVICE_ATTR(firstep_level_max, S_IRUGO, +static DEVICE_ATTR(firstep_level_max, 0444, ath5k_attr_show_firstep_level_max, NULL); static struct attribute *ath5k_sysfs_entries_ani[] = { diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 1eea6c23976f..0f965e9f38a4 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -1794,69 +1794,68 @@ int ath6kl_debug_init_fs(struct ath6kl *ar) if (!ar->debugfs_phy) return -ENOMEM; - debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar, + debugfs_create_file("tgt_stats", 0400, ar->debugfs_phy, ar, &fops_tgt_stats); if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO) - debugfs_create_file("credit_dist_stats", S_IRUSR, + debugfs_create_file("credit_dist_stats", 0400, ar->debugfs_phy, ar, &fops_credit_dist_stats); - debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR, + debugfs_create_file("endpoint_stats", 0600, ar->debugfs_phy, ar, &fops_endpoint_stats); - debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, - &fops_fwlog); + debugfs_create_file("fwlog", 0400, ar->debugfs_phy, ar, &fops_fwlog); - debugfs_create_file("fwlog_block", S_IRUSR, ar->debugfs_phy, ar, + debugfs_create_file("fwlog_block", 0400, ar->debugfs_phy, ar, &fops_fwlog_block); - debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy, + debugfs_create_file("fwlog_mask", 0600, ar->debugfs_phy, ar, &fops_fwlog_mask); - debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("reg_addr", 0600, ar->debugfs_phy, ar, &fops_diag_reg_read); - debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar, + debugfs_create_file("reg_dump", 0400, ar->debugfs_phy, ar, &fops_reg_dump); - debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR, + debugfs_create_file("lrssi_roam_threshold", 0600, ar->debugfs_phy, ar, &fops_lrssi_roam_threshold); - debugfs_create_file("reg_write", S_IRUSR | S_IWUSR, + debugfs_create_file("reg_write", 0600, ar->debugfs_phy, ar, &fops_diag_reg_write); - debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, + debugfs_create_file("war_stats", 0400, ar->debugfs_phy, ar, &fops_war_stats); - debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar, + debugfs_create_file("roam_table", 0400, ar->debugfs_phy, ar, &fops_roam_table); - debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("force_roam", 0200, ar->debugfs_phy, ar, &fops_force_roam); - debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("roam_mode", 0200, ar->debugfs_phy, ar, &fops_roam_mode); - debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("keepalive", 0600, ar->debugfs_phy, ar, &fops_keepalive); - debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR, + debugfs_create_file("disconnect_timeout", 0600, ar->debugfs_phy, ar, &fops_disconnect_timeout); - debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("create_qos", 0200, ar->debugfs_phy, ar, &fops_create_qos); - debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("delete_qos", 0200, ar->debugfs_phy, ar, &fops_delete_qos); - debugfs_create_file("bgscan_interval", S_IWUSR, + debugfs_create_file("bgscan_interval", 0200, ar->debugfs_phy, ar, &fops_bgscan_int); - debugfs_create_file("listen_interval", S_IRUSR | S_IWUSR, + debugfs_create_file("listen_interval", 0600, ar->debugfs_phy, ar, &fops_listen_int); - debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar, + debugfs_create_file("power_params", 0200, ar->debugfs_phy, ar, &fops_power_params); return 0; diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c index 84afcf78151f..239429f10378 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.c +++ b/drivers/net/wireless/ath/ath9k/common-debug.c @@ -47,7 +47,7 @@ static const struct file_operations fops_modal_eeprom = { void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy, struct ath_hw *ah) { - debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah, + debugfs_create_file("modal_eeprom", 0400, debugfs_phy, ah, &fops_modal_eeprom); } EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom); @@ -82,7 +82,7 @@ static const struct file_operations fops_base_eeprom = { void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy, struct ath_hw *ah) { - debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah, + debugfs_create_file("base_eeprom", 0400, debugfs_phy, ah, &fops_base_eeprom); } EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom); @@ -178,8 +178,7 @@ static const struct file_operations fops_recv = { void ath9k_cmn_debug_recv(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats) { - debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats, - &fops_recv); + debugfs_create_file("recv", 0400, debugfs_phy, rxstats, &fops_recv); } EXPORT_SYMBOL(ath9k_cmn_debug_recv); @@ -255,7 +254,7 @@ static const struct file_operations fops_phy_err = { void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats) { - debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats, + debugfs_create_file("phy_err", 0400, debugfs_phy, rxstats, &fops_phy_err); } EXPORT_SYMBOL(ath9k_cmn_debug_phy_err); diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c index 8b4f7fdabf58..82de0fadbc95 100644 --- a/drivers/net/wireless/ath/ath9k/common-init.c +++ b/drivers/net/wireless/ath/ath9k/common-init.c @@ -88,7 +88,7 @@ static const struct ieee80211_channel ath9k_5ghz_chantable[] = { CHAN5G(5825, 37), /* Channel 165 */ }; -/* Atheros hardware rate code addition for short premble */ +/* Atheros hardware rate code addition for short preamble */ #define SHPCHECK(__hw_rate, __flags) \ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 5e77fe1f5b0d..440e16e641e4 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) { int i = 0; int ret = 0; + struct rchan_buf *buf; struct rchan *rc = spec_priv->rfs_chan_spec_scan; - for_each_online_cpu(i) - ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); - - i = num_online_cpus(); + for_each_possible_cpu(i) { + if ((buf = *per_cpu_ptr(rc->buf, i))) { + ret += relay_buf_full(buf); + } + } - if (ret == i) + if (ret) return 1; else return 0; @@ -1096,23 +1098,23 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, return; debugfs_create_file("spectral_scan_ctl", - S_IRUSR | S_IWUSR, + 0600, debugfs_phy, spec_priv, &fops_spec_scan_ctl); debugfs_create_file("spectral_short_repeat", - S_IRUSR | S_IWUSR, + 0600, debugfs_phy, spec_priv, &fops_spectral_short_repeat); debugfs_create_file("spectral_count", - S_IRUSR | S_IWUSR, + 0600, debugfs_phy, spec_priv, &fops_spectral_count); debugfs_create_file("spectral_period", - S_IRUSR | S_IWUSR, + 0600, debugfs_phy, spec_priv, &fops_spectral_period); debugfs_create_file("spectral_fft_period", - S_IRUSR | S_IWUSR, + 0600, debugfs_phy, spec_priv, &fops_spectral_fft_period); } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 9e8aed5c478c..f685843a2ff3 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1385,7 +1385,7 @@ int ath9k_init_debug(struct ath_hw *ah) return -ENOMEM; #ifdef CONFIG_ATH_DEBUG - debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_file("debug", 0600, sc->debug.debugfs_phy, sc, &fops_debug); #endif @@ -1409,22 +1409,22 @@ int ath9k_init_debug(struct ath_hw *ah) ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); - debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy, + debugfs_create_u8("rx_chainmask", 0400, sc->debug.debugfs_phy, &ah->rxchainmask); - debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy, + debugfs_create_u8("tx_chainmask", 0400, sc->debug.debugfs_phy, &ah->txchainmask); - debugfs_create_file("ani", S_IRUSR | S_IWUSR, + debugfs_create_file("ani", 0600, sc->debug.debugfs_phy, sc, &fops_ani); - debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_bool("paprd", 0600, sc->debug.debugfs_phy, &sc->sc_ah->config.enable_paprd); - debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_file("regidx", 0600, sc->debug.debugfs_phy, sc, &fops_regidx); - debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_file("regval", 0600, sc->debug.debugfs_phy, sc, &fops_regval); - debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR, + debugfs_create_bool("ignore_extcca", 0600, sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca); - debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc, + debugfs_create_file("regdump", 0400, sc->debug.debugfs_phy, sc, &fops_regdump); debugfs_create_devm_seqfile(sc->dev, "dump_nfcal", sc->debug.debugfs_phy, @@ -1433,35 +1433,33 @@ int ath9k_init_debug(struct ath_hw *ah) ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah); ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah); - debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, + debugfs_create_u32("gpio_mask", 0600, sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); - debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, + debugfs_create_u32("gpio_val", 0600, sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); - debugfs_create_file("antenna_diversity", S_IRUSR, + debugfs_create_file("antenna_diversity", 0400, sc->debug.debugfs_phy, sc, &fops_antenna_diversity); #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT - debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR, + debugfs_create_file("bt_ant_diversity", 0600, sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity); - debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc, + debugfs_create_file("btcoex", 0400, sc->debug.debugfs_phy, sc, &fops_btcoex); #endif #ifdef CONFIG_ATH9K_WOW - debugfs_create_file("wow", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_wow); + debugfs_create_file("wow", 0600, sc->debug.debugfs_phy, sc, &fops_wow); #endif #ifdef CONFIG_ATH9K_DYNACK - debugfs_create_file("ack_to", S_IRUSR, sc->debug.debugfs_phy, + debugfs_create_file("ack_to", 0400, sc->debug.debugfs_phy, sc, &fops_ackto); #endif - debugfs_create_file("tpc", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_tpc); + debugfs_create_file("tpc", 0600, sc->debug.debugfs_phy, sc, &fops_tpc); - debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR, + debugfs_create_u16("airtime_flags", 0600, sc->debug.debugfs_phy, &sc->airtime_flags); - debugfs_create_file("nf_override", S_IRUSR | S_IWUSR, + debugfs_create_file("nf_override", 0600, sc->debug.debugfs_phy, sc, &fops_nf_override); return 0; diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index efc692ee67d4..a6f45f1bb5bb 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -302,7 +302,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, { struct ath_node *an = (struct ath_node *)sta->drv_priv; - debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr); - debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv); - debugfs_create_file("airtime", S_IRUGO, dir, an, &fops_airtime); + debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr); + debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv); + debugfs_create_file("airtime", 0444, dir, an, &fops_airtime); } diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 8824610c21fb..3251c9abe270 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -144,8 +144,8 @@ static const struct file_operations fops_dfs_stats = { void ath9k_dfs_init_debug(struct ath_softc *sc) { - debugfs_create_file("dfs_stats", S_IRUSR, + debugfs_create_file("dfs_stats", 0400, sc->debug.debugfs_phy, sc, &fops_dfs_stats); - debugfs_create_file("dfs_simulate_radar", S_IWUSR, + debugfs_create_file("dfs_simulate_radar", 0200, sc->debug.debugfs_phy, sc, &fops_simulate_radar); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index dc79afd7e151..b3ed65e5c4da 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -496,25 +496,25 @@ int ath9k_htc_init_debug(struct ath_hw *ah) ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); - debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("tgt_int_stats", 0400, priv->debug.debugfs_phy, priv, &fops_tgt_int_stats); - debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("tgt_tx_stats", 0400, priv->debug.debugfs_phy, priv, &fops_tgt_tx_stats); - debugfs_create_file("tgt_rx_stats", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("tgt_rx_stats", 0400, priv->debug.debugfs_phy, priv, &fops_tgt_rx_stats); - debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("xmit", 0400, priv->debug.debugfs_phy, priv, &fops_xmit); - debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("skb_rx", 0400, priv->debug.debugfs_phy, priv, &fops_skb_rx); ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats); ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats); - debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("slot", 0400, priv->debug.debugfs_phy, priv, &fops_slot); - debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy, + debugfs_create_file("queue", 0400, priv->debug.debugfs_phy, priv, &fops_queue); - debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy, + debugfs_create_file("debug", 0600, priv->debug.debugfs_phy, priv, &fops_debug); ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index e89e5ef2c2a4..214c68269a69 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -591,7 +591,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); - memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); + eth_broadcast_addr(common->bssidmask); common->last_rssi = ATH_RSSI_DUMMY_MARKER; priv->ah->opmode = NL80211_IFTYPE_STATION; @@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP); if (ath9k_ps_enable) ieee80211_hw_set(hw, SUPPORTS_PS); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cd0f023ccf77..6b37036b2d36 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -184,7 +184,8 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah, break; case WLAN_RC_PHY_OFDM: if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { - bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; + bitsPerSymbol = + ((kbps >> 2) * OFDM_SYMBOL_TIME_QUARTER) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_QUARTER @@ -192,7 +193,8 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah, + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); } else if (ah->curchan && IS_CHAN_HALF_RATE(ah->curchan)) { - bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; + bitsPerSymbol = + ((kbps >> 1) * OFDM_SYMBOL_TIME_HALF) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_HALF + @@ -1036,7 +1038,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) int acktimeout, ctstimeout, ack_offset = 0; int slottime; int sifstime; - int rx_lat = 0, tx_lat = 0, eifs = 0; + int rx_lat = 0, tx_lat = 0, eifs = 0, ack_shift = 0; u32 reg; ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n", @@ -1068,6 +1070,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) sifstime = 32; ack_offset = 16; + ack_shift = 3; slottime = 13; } else if (IS_CHAN_QUARTER_RATE(chan)) { eifs = 340; @@ -1078,6 +1081,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) sifstime = 64; ack_offset = 32; + ack_shift = 1; slottime = 21; } else { if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { @@ -1134,6 +1138,10 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) SM(tx_lat, AR_USEC_TX_LAT), AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC); + if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) + REG_RMW(ah, AR_TXSIFS, + sifstime | SM(ack_shift, AR_TXSIFS_ACK_SHIFT), + (AR_TXSIFS_TIME | AR_TXSIFS_ACK_SHIFT)); } EXPORT_SYMBOL(ath9k_hw_init_global_settings); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index e479fae5aab9..c070a9e51ebf 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -257,6 +257,11 @@ static void ath9k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, reg); + /* synchronize DFS detector if regulatory domain changed */ + if (sc->dfs_detector != NULL) + sc->dfs_detector->set_dfs_domain(sc->dfs_detector, + request->dfs_region); + /* Set tx power */ if (!ah->curchan) return; @@ -267,10 +272,6 @@ static void ath9k_reg_notifier(struct wiphy *wiphy, ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, sc->cur_chan->txpower, &sc->cur_chan->cur_txpower); - /* synchronize DFS detector if regulatory domain changed */ - if (sc->dfs_detector != NULL) - sc->dfs_detector->set_dfs_domain(sc->dfs_detector, - request->dfs_region); ath9k_ps_restore(sc); } @@ -427,7 +428,7 @@ static void ath9k_init_misc(struct ath_softc *sc) timer_setup(&common->ani.timer, ath_ani_calibrate, 0); common->last_rssi = ATH_RSSI_DUMMY_MARKER; - memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); + eth_broadcast_addr(common->bssidmask); sc->beacon.slottime = 9; for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index fe3a8263b224..ce50d8f5835e 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -278,10 +278,10 @@ void ath9k_tx99_init_debug(struct ath_softc *sc) if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah)) return; - debugfs_create_file("tx99", S_IRUSR | S_IWUSR, + debugfs_create_file("tx99", 0600, sc->debug.debugfs_phy, sc, &fops_tx99); - debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR, + debugfs_create_file("tx99_power", 0600, sc->debug.debugfs_phy, sc, &fops_tx99_power); } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 396bf05c6bf6..7fdb152be0bb 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -252,7 +252,7 @@ ath_tid_pull(struct ath_atx_tid *tid) } return skb; - } +} static bool ath_tid_has_buffered(struct ath_atx_tid *tid) @@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) struct ath_txq *txq; int tidno; + rcu_read_lock(); + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); txq = tid->txq; @@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) if (!an->sta) break; /* just one multicast ath_atx_tid */ } + + rcu_read_unlock(); } #ifdef CONFIG_ATH9K_TX99 diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c index ec3a64e5d2bb..a9b6dc17e408 100644 --- a/drivers/net/wireless/ath/carl9170/debug.c +++ b/drivers/net/wireless/ath/carl9170/debug.c @@ -187,21 +187,21 @@ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\ #define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ - NULL, _read_bufsize, S_IRUSR) + NULL, _read_bufsize, 0400) #define DEBUGFS_DECLARE_WO_FILE(name) \ DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\ - 0, S_IWUSR) + 0, 0200) #define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ - _read_bufsize, S_IRUSR | S_IWUSR) + _read_bufsize, 0600) #define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \ __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \ carl9170_debugfs_##name ##_write, \ - _read_bufsize, S_IRUSR | S_IWUSR, _dstate) + _read_bufsize, 0600, _dstate) #define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \ static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \ diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 988c8857d78c..29e93c953d93 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -48,11 +48,11 @@ #include "cmd.h" static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload."); int modparam_noht; -module_param_named(noht, modparam_noht, int, S_IRUGO); +module_param_named(noht, modparam_noht, int, 0444); MODULE_PARM_DESC(noht, "Disable MPDU aggregation."); #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 4100ffd42a43..448b83eea810 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -115,7 +115,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = { JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false), JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false), JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false), - JP_PATTERN(7, 50, 100, 1000, 2000, 1, 3, 50, false), + JP_PATTERN(7, 50, 100, 1000, 2000, 1, 3, 50, true), JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false), }; diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 2a6bb62e785c..389b5e7129a6 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -161,9 +161,8 @@ void wcn36xx_debugfs_init(struct wcn36xx *wcn) dfs->rootdir = NULL; } - ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR, - &fops_wcn36xx_bmps, wcn); - ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn); + ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn); + ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn); } void wcn36xx_debugfs_exit(struct wcn36xx *wcn) diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index a3f1f7d042a4..2c3b899a88fa 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -27,15 +27,6 @@ #include "wcn36xx.h" #include "txrx.h" -void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low) -{ - struct wcn36xx_dxe_ch *ch = is_low ? - &wcn->dxe_tx_l_ch : - &wcn->dxe_tx_h_ch; - - return ch->head_blk_ctl->bd_cpu_addr; -} - static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data) { wcn36xx_dbg(WCN36XX_DBG_DXE, @@ -376,7 +367,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) spin_lock_irqsave(&ch->lock, flags); ctl = ch->tail_blk_ctl; do { - if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK) + if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD) break; if (ctl->skb) { dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, @@ -397,7 +388,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) } ctl = ctl->next; } while (ctl != ch->head_blk_ctl && - !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)); + !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)); ch->tail_blk_ctl = ctl; spin_unlock_irqrestore(&ch->lock, flags); @@ -415,14 +406,31 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H, &int_reason); - /* TODO: Check int_reason */ - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, WCN36XX_INT_MASK_CHAN_TX_H); - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, - WCN36XX_INT_MASK_CHAN_TX_H); + if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ERR_CLR, + WCN36XX_INT_MASK_CHAN_TX_H); + + wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n", + int_src); + } + + if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_DONE_CLR, + WCN36XX_INT_MASK_CHAN_TX_H); + } + + if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ED_CLR, + WCN36XX_INT_MASK_CHAN_TX_H); + } + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n"); reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); } @@ -431,14 +439,33 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L, &int_reason); - /* TODO: Check int_reason */ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, WCN36XX_INT_MASK_CHAN_TX_L); - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR, - WCN36XX_INT_MASK_CHAN_TX_L); + + if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ERR_CLR, + WCN36XX_INT_MASK_CHAN_TX_L); + + wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n", + int_src); + } + + if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_DONE_CLR, + WCN36XX_INT_MASK_CHAN_TX_L); + } + + if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ED_CLR, + WCN36XX_INT_MASK_CHAN_TX_L); + } + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n"); reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); } @@ -503,7 +530,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, int_mask = WCN36XX_DXE_INT_CH3_MASK; } - while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) { + while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl); @@ -612,6 +639,7 @@ void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn) int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv, + struct wcn36xx_tx_bd *bd, struct sk_buff *skb, bool is_low) { @@ -645,6 +673,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, ctl->skb = NULL; desc = ctl->desc; + /* write buffer descriptor */ + memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd)); + /* Set source address of the BD we send */ desc->src_addr_l = ctl->bd_phy_addr; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index c012e807753b..ce580960d109 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -33,15 +33,106 @@ H2H_TEST_RX_TX = DMA2 #define WCN36XX_CCU_DXE_INT_SELECT_RIVA 0x310 #define WCN36XX_CCU_DXE_INT_SELECT_PRONTO 0x10dc -/* TODO This must calculated properly but not hardcoded */ -#define WCN36XX_DXE_CTRL_TX_L 0x328a44 -#define WCN36XX_DXE_CTRL_TX_H 0x32ce44 -#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f -#define WCN36XX_DXE_CTRL_RX_H 0x12d12f -#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45 -#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d -#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45 -#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d +/* Descriptor valid */ +#define WCN36xx_DXE_CTRL_VLD BIT(0) +/* End of packet */ +#define WCN36xx_DXE_CTRL_EOP BIT(3) +/* BD handling bit */ +#define WCN36xx_DXE_CTRL_BDH BIT(4) +/* Source is a queue */ +#define WCN36xx_DXE_CTRL_SIQ BIT(5) +/* Destination is a queue */ +#define WCN36xx_DXE_CTRL_DIQ BIT(6) +/* Pointer address is a queue */ +#define WCN36xx_DXE_CTRL_PIQ BIT(7) +/* Release PDU when done */ +#define WCN36xx_DXE_CTRL_PDU_REL BIT(8) +/* STOP channel processing */ +#define WCN36xx_DXE_CTRL_STOP BIT(16) +/* INT on descriptor done */ +#define WCN36xx_DXE_CTRL_INT BIT(17) +/* Endian byte swap enable */ +#define WCN36xx_DXE_CTRL_SWAP BIT(20) +/* Master endianness */ +#define WCN36xx_DXE_CTRL_ENDIANNESS BIT(21) + +/* Transfer type */ +#define WCN36xx_DXE_CTRL_XTYPE_SHIFT 1 +#define WCN36xx_DXE_CTRL_XTYPE_MASK GENMASK(2, WCN36xx_DXE_CTRL_XTYPE_SHIFT) +#define WCN36xx_DXE_CTRL_XTYPE_SET(x) ((x) << WCN36xx_DXE_CTRL_XTYPE_SHIFT) + +/* BMU Threshold select */ +#define WCN36xx_DXE_CTRL_BTHLD_SEL_SHIFT 9 +#define WCN36xx_DXE_CTRL_BTHLD_SEL_MASK GENMASK(12, WCN36xx_DXE_CTRL_BTHLD_SEL_SHIFT) +#define WCN36xx_DXE_CTRL_BTHLD_SEL_SET(x) ((x) << WCN36xx_DXE_CTRL_BTHLD_SEL_SHIFT) + +/* Priority */ +#define WCN36xx_DXE_CTRL_PRIO_SHIFT 13 +#define WCN36xx_DXE_CTRL_PRIO_MASK GENMASK(15, WCN36xx_DXE_CTRL_PRIO_SHIFT) +#define WCN36xx_DXE_CTRL_PRIO_SET(x) ((x) << WCN36xx_DXE_CTRL_PRIO_SHIFT) + +/* BD Template index */ +#define WCN36xx_DXE_CTRL_BDT_IDX_SHIFT 18 +#define WCN36xx_DXE_CTRL_BDT_IDX_MASK GENMASK(19, WCN36xx_DXE_CTRL_BDT_IDX_SHIFT) +#define WCN36xx_DXE_CTRL_BDT_IDX_SET(x) ((x) << WCN36xx_DXE_CTRL_BDT_IDX_SHIFT) + +/* Transfer types: */ +/* Host to host */ +#define WCN36xx_DXE_XTYPE_H2H (0) +/* Host to BMU */ +#define WCN36xx_DXE_XTYPE_H2B (2) +/* BMU to host */ +#define WCN36xx_DXE_XTYPE_B2H (3) + +#define WCN36XX_DXE_CTRL_TX_L (WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_DIQ | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(5) | \ + WCN36xx_DXE_CTRL_PRIO_SET(4) | WCN36xx_DXE_CTRL_INT | \ + WCN36xx_DXE_CTRL_SWAP | WCN36xx_DXE_CTRL_ENDIANNESS) + +#define WCN36XX_DXE_CTRL_TX_H (WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_DIQ | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(7) | \ + WCN36xx_DXE_CTRL_PRIO_SET(6) | WCN36xx_DXE_CTRL_INT | \ + WCN36xx_DXE_CTRL_SWAP | WCN36xx_DXE_CTRL_ENDIANNESS) + +#define WCN36XX_DXE_CTRL_RX_L (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_B2H) | \ + WCN36xx_DXE_CTRL_EOP | WCN36xx_DXE_CTRL_SIQ | \ + WCN36xx_DXE_CTRL_PDU_REL | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(6) | \ + WCN36xx_DXE_CTRL_PRIO_SET(5) | WCN36xx_DXE_CTRL_INT | \ + WCN36xx_DXE_CTRL_SWAP) + +#define WCN36XX_DXE_CTRL_RX_H (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_B2H) | \ + WCN36xx_DXE_CTRL_EOP | WCN36xx_DXE_CTRL_SIQ | \ + WCN36xx_DXE_CTRL_PDU_REL | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(8) | \ + WCN36xx_DXE_CTRL_PRIO_SET(6) | WCN36xx_DXE_CTRL_INT | \ + WCN36xx_DXE_CTRL_SWAP) + +#define WCN36XX_DXE_CTRL_TX_H_BD (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_DIQ | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(7) | \ + WCN36xx_DXE_CTRL_PRIO_SET(6) | WCN36xx_DXE_CTRL_SWAP | \ + WCN36xx_DXE_CTRL_ENDIANNESS) + +#define WCN36XX_DXE_CTRL_TX_H_SKB (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_EOP | WCN36xx_DXE_CTRL_DIQ | \ + WCN36xx_DXE_CTRL_BTHLD_SEL_SET(7) | WCN36xx_DXE_CTRL_PRIO_SET(6) | \ + WCN36xx_DXE_CTRL_INT | WCN36xx_DXE_CTRL_SWAP | \ + WCN36xx_DXE_CTRL_ENDIANNESS) + +#define WCN36XX_DXE_CTRL_TX_L_BD (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_DIQ | WCN36xx_DXE_CTRL_BTHLD_SEL_SET(5) | \ + WCN36xx_DXE_CTRL_PRIO_SET(4) | WCN36xx_DXE_CTRL_SWAP | \ + WCN36xx_DXE_CTRL_ENDIANNESS) + +#define WCN36XX_DXE_CTRL_TX_L_SKB (WCN36xx_DXE_CTRL_VLD | \ + WCN36xx_DXE_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CTRL_EOP | WCN36xx_DXE_CTRL_DIQ | \ + WCN36xx_DXE_CTRL_BTHLD_SEL_SET(5) | WCN36xx_DXE_CTRL_PRIO_SET(4) | \ + WCN36xx_DXE_CTRL_INT | WCN36xx_DXE_CTRL_SWAP | \ + WCN36xx_DXE_CTRL_ENDIANNESS) /* TODO This must calculated properly but not hardcoded */ #define WCN36XX_DXE_WQ_TX_L 0x17 @@ -49,15 +140,106 @@ H2H_TEST_RX_TX = DMA2 #define WCN36XX_DXE_WQ_RX_L 0xB #define WCN36XX_DXE_WQ_RX_H 0x4 -/* DXE descriptor control filed */ -#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001) +/* Channel enable or restart */ +#define WCN36xx_DXE_CH_CTRL_EN BIT(0) +/* End of packet bit */ +#define WCN36xx_DXE_CH_CTRL_EOP BIT(3) +/* BD Handling bit */ +#define WCN36xx_DXE_CH_CTRL_BDH BIT(4) +/* Source is queue */ +#define WCN36xx_DXE_CH_CTRL_SIQ BIT(5) +/* Destination is queue */ +#define WCN36xx_DXE_CH_CTRL_DIQ BIT(6) +/* Pointer descriptor is queue */ +#define WCN36xx_DXE_CH_CTRL_PIQ BIT(7) +/* Relase PDU when done */ +#define WCN36xx_DXE_CH_CTRL_PDU_REL BIT(8) +/* Stop channel processing */ +#define WCN36xx_DXE_CH_CTRL_STOP BIT(16) +/* Enable external descriptor interrupt */ +#define WCN36xx_DXE_CH_CTRL_INE_ED BIT(17) +/* Enable channel interrupt on errors */ +#define WCN36xx_DXE_CH_CTRL_INE_ERR BIT(18) +/* Enable Channel interrupt when done */ +#define WCN36xx_DXE_CH_CTRL_INE_DONE BIT(19) +/* External descriptor enable */ +#define WCN36xx_DXE_CH_CTRL_EDEN BIT(20) +/* Wait for valid bit */ +#define WCN36xx_DXE_CH_CTRL_EDVEN BIT(21) +/* Endianness is little endian*/ +#define WCN36xx_DXE_CH_CTRL_ENDIANNESS BIT(26) +/* Abort transfer */ +#define WCN36xx_DXE_CH_CTRL_ABORT BIT(27) +/* Long descriptor format */ +#define WCN36xx_DXE_CH_CTRL_DFMT BIT(28) +/* Endian byte swap enable */ +#define WCN36xx_DXE_CH_CTRL_SWAP BIT(31) + +/* Transfer type */ +#define WCN36xx_DXE_CH_CTRL_XTYPE_SHIFT 1 +#define WCN36xx_DXE_CH_CTRL_XTYPE_MASK GENMASK(2, WCN36xx_DXE_CH_CTRL_XTYPE_SHIFT) +#define WCN36xx_DXE_CH_CTRL_XTYPE_SET(x) ((x) << WCN36xx_DXE_CH_CTRL_XTYPE_SHIFT) + +/* Channel BMU Threshold select */ +#define WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SHIFT 9 +#define WCN36xx_DXE_CH_CTRL_BTHLD_SEL_MASK GENMASK(12, WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SHIFT) +#define WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SET(x) ((x) << WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SHIFT) + +/* Channel Priority */ +#define WCN36xx_DXE_CH_CTRL_PRIO_SHIFT 13 +#define WCN36xx_DXE_CH_CTRL_PRIO_MASK GENMASK(15, WCN36xx_DXE_CH_CTRL_PRIO_SHIFT) +#define WCN36xx_DXE_CH_CTRL_PRIO_SET(x) ((x) << WCN36xx_DXE_CH_CTRL_PRIO_SHIFT) + +/* Counter select */ +#define WCN36xx_DXE_CH_CTRL_SEL_SHIFT 22 +#define WCN36xx_DXE_CH_CTRL_SEL_MASK GENMASK(25, WCN36xx_DXE_CH_CTRL_SEL_SHIFT) +#define WCN36xx_DXE_CH_CTRL_SEL_SET(x) ((x) << WCN36xx_DXE_CH_CTRL_SEL_SHIFT) + +/* Channel BD template index */ +#define WCN36xx_DXE_CH_CTRL_BDT_IDX_SHIFT 29 +#define WCN36xx_DXE_CH_CTRL_BDT_IDX_MASK GENMASK(30, WCN36xx_DXE_CH_CTRL_BDT_IDX_SHIFT) +#define WCN36xx_DXE_CH_CTRL_BDT_IDX_SET(x) ((x) << WCN36xx_DXE_CH_CTRL_BDT_IDX_SHIFT) -/* TODO This must calculated properly but not hardcoded */ /* DXE default control register values */ -#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F -#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F -#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D -#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d +#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L (WCN36xx_DXE_CH_CTRL_EN | \ + WCN36xx_DXE_CH_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_B2H) | \ + WCN36xx_DXE_CH_CTRL_EOP | WCN36xx_DXE_CH_CTRL_SIQ | \ + WCN36xx_DXE_CH_CTRL_PDU_REL | WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SET(6) | \ + WCN36xx_DXE_CH_CTRL_PRIO_SET(5) | WCN36xx_DXE_CH_CTRL_INE_ED | \ + WCN36xx_DXE_CH_CTRL_INE_ERR | WCN36xx_DXE_CH_CTRL_INE_DONE | \ + WCN36xx_DXE_CH_CTRL_EDEN | WCN36xx_DXE_CH_CTRL_EDVEN | \ + WCN36xx_DXE_CH_CTRL_SEL_SET(1) | WCN36xx_DXE_CH_CTRL_ENDIANNESS | \ + WCN36xx_DXE_CH_CTRL_SWAP) + +#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H (WCN36xx_DXE_CH_CTRL_EN | \ + WCN36xx_DXE_CH_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_B2H) | \ + WCN36xx_DXE_CH_CTRL_EOP | WCN36xx_DXE_CH_CTRL_SIQ | \ + WCN36xx_DXE_CH_CTRL_PDU_REL | WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SET(8) | \ + WCN36xx_DXE_CH_CTRL_PRIO_SET(6) | WCN36xx_DXE_CH_CTRL_INE_ED | \ + WCN36xx_DXE_CH_CTRL_INE_ERR | WCN36xx_DXE_CH_CTRL_INE_DONE | \ + WCN36xx_DXE_CH_CTRL_EDEN | WCN36xx_DXE_CH_CTRL_EDVEN | \ + WCN36xx_DXE_CH_CTRL_SEL_SET(3) | WCN36xx_DXE_CH_CTRL_ENDIANNESS | \ + WCN36xx_DXE_CH_CTRL_SWAP) + +#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H (WCN36xx_DXE_CH_CTRL_EN | \ + WCN36xx_DXE_CH_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CH_CTRL_EOP | WCN36xx_DXE_CH_CTRL_DIQ | \ + WCN36xx_DXE_CH_CTRL_PDU_REL | WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SET(7) | \ + WCN36xx_DXE_CH_CTRL_PRIO_SET(6) | WCN36xx_DXE_CH_CTRL_INE_ED | \ + WCN36xx_DXE_CH_CTRL_INE_ERR | WCN36xx_DXE_CH_CTRL_INE_DONE | \ + WCN36xx_DXE_CH_CTRL_EDEN | WCN36xx_DXE_CH_CTRL_EDVEN | \ + WCN36xx_DXE_CH_CTRL_SEL_SET(4) | WCN36xx_DXE_CH_CTRL_ENDIANNESS | \ + WCN36xx_DXE_CH_CTRL_SWAP) + +#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L (WCN36xx_DXE_CH_CTRL_EN | \ + WCN36xx_DXE_CH_CTRL_XTYPE_SET(WCN36xx_DXE_XTYPE_H2B) | \ + WCN36xx_DXE_CH_CTRL_EOP | WCN36xx_DXE_CH_CTRL_DIQ | \ + WCN36xx_DXE_CH_CTRL_PDU_REL | WCN36xx_DXE_CH_CTRL_BTHLD_SEL_SET(5) | \ + WCN36xx_DXE_CH_CTRL_PRIO_SET(4) | WCN36xx_DXE_CH_CTRL_INE_ED | \ + WCN36xx_DXE_CH_CTRL_INE_ERR | WCN36xx_DXE_CH_CTRL_INE_DONE | \ + WCN36xx_DXE_CH_CTRL_EDEN | WCN36xx_DXE_CH_CTRL_EDVEN | \ + WCN36xx_DXE_CH_CTRL_SEL_SET(0) | WCN36xx_DXE_CH_CTRL_ENDIANNESS | \ + WCN36xx_DXE_CH_CTRL_SWAP) /* Common DXE registers */ #define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00) @@ -80,6 +262,10 @@ H2H_TEST_RX_TX = DMA2 #define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38) #define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C) +#define WCN36XX_CH_STAT_INT_DONE_MASK 0x00008000 +#define WCN36XX_CH_STAT_INT_ERR_MASK 0x00004000 +#define WCN36XX_CH_STAT_INT_ED_MASK 0x00002000 + #define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404) #define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444) #define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484) @@ -266,6 +452,7 @@ struct wcn36xx_dxe_mem_pool { dma_addr_t phy_addr; }; +struct wcn36xx_tx_bd; struct wcn36xx_vif; int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn); void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn); @@ -277,8 +464,8 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn); int wcn36xx_dxe_init_channels(struct wcn36xx *wcn); int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv, + struct wcn36xx_tx_bd *bd, struct sk_buff *skb, bool is_low); void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status); -void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low); #endif /* _DXE_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index ab5be6d2c691..69d6be59d97f 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -261,7 +261,7 @@ static void wcn36xx_feat_caps_info(struct wcn36xx *wcn) for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) { if (get_feat_caps(wcn->fw_feat_caps, i)) - wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i)); + wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n", wcn36xx_get_cap_name(i)); } } @@ -666,16 +666,13 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, { struct wcn36xx *wcn = hw->priv; - if (!wcn36xx_smd_stop_hw_scan(wcn)) { - struct cfg80211_scan_info scan_info = { .aborted = true }; - - ieee80211_scan_completed(wcn->hw, &scan_info); - } - mutex_lock(&wcn->scan_lock); wcn->scan_aborted = true; mutex_unlock(&wcn->scan_lock); + /* ieee80211_scan_completed will be called on FW scan indication */ + wcn36xx_smd_stop_hw_scan(wcn); + cancel_work_sync(&wcn->scan_work); } @@ -1155,8 +1152,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); - wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; - #ifdef CONFIG_PM wcn->hw->wiphy->wowlan = &wowlan_support; #endif @@ -1283,6 +1278,7 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; + wcn->first_boot = true; mutex_init(&wcn->conf_mutex); mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 2a4871ca9c72..8932af5e4d8d 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -409,15 +409,17 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len) wcn->fw_minor = rsp->start_rsp_params.version.minor; wcn->fw_major = rsp->start_rsp_params.version.major; - wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n", - wcn->wlan_version, wcn->crm_version); - - wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n", - wcn->fw_major, wcn->fw_minor, - wcn->fw_version, wcn->fw_revision, - rsp->start_rsp_params.stations, - rsp->start_rsp_params.bssids); + if (wcn->first_boot) { + wcn->first_boot = false; + wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n", + wcn->wlan_version, wcn->crm_version); + wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n", + wcn->fw_major, wcn->fw_minor, + wcn->fw_version, wcn->fw_revision, + rsp->start_rsp_params.stations, + rsp->start_rsp_params.bssids); + } return 0; } @@ -2138,6 +2140,8 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_SCAN_IND_COMPLETED: mutex_lock(&wcn->scan_lock); wcn->scan_req = NULL; + if (wcn->scan_aborted) + scan_info.aborted = true; mutex_unlock(&wcn->scan_lock); ieee80211_scan_completed(wcn->hw, &scan_info); break; @@ -2407,54 +2411,63 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) { struct wcn36xx *wcn = container_of(work, struct wcn36xx, hal_ind_work); - struct wcn36xx_hal_msg_header *msg_header; - struct wcn36xx_hal_ind_msg *hal_ind_msg; - unsigned long flags; - spin_lock_irqsave(&wcn->hal_ind_lock, flags); + for (;;) { + struct wcn36xx_hal_msg_header *msg_header; + struct wcn36xx_hal_ind_msg *hal_ind_msg; + unsigned long flags; - hal_ind_msg = list_first_entry(&wcn->hal_ind_queue, - struct wcn36xx_hal_ind_msg, - list); - list_del(wcn->hal_ind_queue.next); - spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); + spin_lock_irqsave(&wcn->hal_ind_lock, flags); - msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg; + if (list_empty(&wcn->hal_ind_queue)) { + spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); + return; + } - switch (msg_header->msg_type) { - case WCN36XX_HAL_COEX_IND: - case WCN36XX_HAL_DEL_BA_IND: - case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: - break; - case WCN36XX_HAL_OTA_TX_COMPL_IND: - wcn36xx_smd_tx_compl_ind(wcn, - hal_ind_msg->msg, - hal_ind_msg->msg_len); - break; - case WCN36XX_HAL_MISSED_BEACON_IND: - wcn36xx_smd_missed_beacon_ind(wcn, - hal_ind_msg->msg, - hal_ind_msg->msg_len); - break; - case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: - wcn36xx_smd_delete_sta_context_ind(wcn, - hal_ind_msg->msg, - hal_ind_msg->msg_len); - break; - case WCN36XX_HAL_PRINT_REG_INFO_IND: - wcn36xx_smd_print_reg_info_ind(wcn, - hal_ind_msg->msg, - hal_ind_msg->msg_len); - break; - case WCN36XX_HAL_SCAN_OFFLOAD_IND: - wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg, - hal_ind_msg->msg_len); - break; - default: - wcn36xx_err("SMD_EVENT (%d) not supported\n", - msg_header->msg_type); + hal_ind_msg = list_first_entry(&wcn->hal_ind_queue, + struct wcn36xx_hal_ind_msg, + list); + list_del(&hal_ind_msg->list); + spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); + + msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg; + + switch (msg_header->msg_type) { + case WCN36XX_HAL_COEX_IND: + case WCN36XX_HAL_DEL_BA_IND: + case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: + break; + case WCN36XX_HAL_OTA_TX_COMPL_IND: + wcn36xx_smd_tx_compl_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; + case WCN36XX_HAL_MISSED_BEACON_IND: + wcn36xx_smd_missed_beacon_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; + case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: + wcn36xx_smd_delete_sta_context_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; + case WCN36XX_HAL_PRINT_REG_INFO_IND: + wcn36xx_smd_print_reg_info_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; + case WCN36XX_HAL_SCAN_OFFLOAD_IND: + wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; + default: + wcn36xx_err("SMD_EVENT (%d) not supported\n", + msg_header->msg_type); + } + + kfree(hal_ind_msg); } - kfree(hal_ind_msg); } int wcn36xx_smd_open(struct wcn36xx *wcn) { diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 22304edc5948..b1768ed6b0be 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -272,21 +272,9 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, bool is_low = ieee80211_is_data(hdr->frame_control); bool bcast = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); - struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low); - - if (!bd) { - /* - * TX DXE are used in pairs. One for the BD and one for the - * actual frame. The BD DXE's has a preallocated buffer while - * the skb ones does not. If this isn't true something is really - * wierd. TODO: Recover from this situation - */ - - wcn36xx_err("bd address may not be NULL for BD DXE\n"); - return -EINVAL; - } + struct wcn36xx_tx_bd bd; - memset(bd, 0, sizeof(*bd)); + memset(&bd, 0, sizeof(bd)); wcn36xx_dbg(WCN36XX_DBG_TX, "tx skb %p len %d fc %04x sn %d %s %s\n", @@ -296,10 +284,10 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len); - bd->dpu_rf = WCN36XX_BMU_WQ_TX; + bd.dpu_rf = WCN36XX_BMU_WQ_TX; - bd->tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS); - if (bd->tx_comp) { + bd.tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS); + if (bd.tx_comp) { wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n"); spin_lock_irqsave(&wcn->dxe_lock, flags); if (wcn->tx_ack_skb) { @@ -321,13 +309,13 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, /* Data frames served first*/ if (is_low) - wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, skb, bcast); + wcn36xx_set_tx_data(&bd, wcn, &vif_priv, sta_priv, skb, bcast); else /* MGMT and CTRL frames are handeld here*/ - wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, skb, bcast); + wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast); - buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32)); - bd->tx_bd_sign = 0xbdbdbdbd; + buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32)); + bd.tx_bd_sign = 0xbdbdbdbd; - return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low); + return wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); } diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 81017e6703b4..5854adf43f3a 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -192,6 +192,8 @@ struct wcn36xx { u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1]; u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1]; + bool first_boot; + /* IRQs */ int tx_irq; int rx_irq; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 768f63f38341..cdbb393863f3 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,8 +18,10 @@ #include <linux/etherdevice.h> #include <linux/moduleparam.h> #include <net/netlink.h> +#include <net/cfg80211.h> #include "wil6210.h" #include "wmi.h" +#include "fw.h" #define WIL_MAX_ROC_DURATION_MS 5000 @@ -258,9 +261,10 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type) return -EOPNOTSUPP; } -int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, +int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct station_info *sinfo) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_notify_req_cmd cmd = { .cid = cid, .interval_usec = 0, @@ -272,17 +276,17 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; - rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; - wil_dbg_wmi(wil, "Link status for CID %d: {\n" + wil_dbg_wmi(wil, "Link status for CID %d MID %d: {\n" " MCS %d TSF 0x%016llx\n" " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", - cid, le16_to_cpu(reply.evt.bf_mcs), + cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, reply.evt.rssi, reply.evt.sqi, @@ -315,7 +319,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, sinfo->tx_packets = stats->tx_packets; sinfo->tx_failed = stats->tx_errors; - if (test_bit(wil_status_fwconnected, wil->status)) { + if (test_bit(wil_vif_fwconnected, vif->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) @@ -331,30 +335,34 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_info *sinfo) { + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - int cid = wil_find_cid(wil, mac); + int cid = wil_find_cid(wil, vif->mid, mac); - wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid); + wil_dbg_misc(wil, "get_station: %pM CID %d MID %d\n", mac, cid, + vif->mid); if (cid < 0) return cid; - rc = wil_cid_fill_sinfo(wil, cid, sinfo); + rc = wil_cid_fill_sinfo(vif, cid, sinfo); return rc; } /* - * Find @idx-th active STA for station dump. + * Find @idx-th active STA for specific MID for station dump. */ -static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx) +static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx) { int i; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { if (wil->sta[i].status == wil_sta_unused) continue; + if (wil->sta[i].mid != mid) + continue; if (idx == 0) return i; idx--; @@ -367,17 +375,19 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { + struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - int cid = wil_find_cid_by_idx(wil, idx); + int cid = wil_find_cid_by_idx(wil, vif->mid, idx); if (cid < 0) return -ENOENT; ether_addr_copy(mac, wil->sta[cid].addr); - wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid); + wil_dbg_misc(wil, "dump_station: %pM CID %d MID %d\n", mac, cid, + vif->mid); - rc = wil_cid_fill_sinfo(wil, cid, sinfo); + rc = wil_cid_fill_sinfo(vif, cid, sinfo); return rc; } @@ -388,7 +398,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "start_p2p_device: entered\n"); - wil->p2p.p2p_dev_started = 1; + wil->p2p_dev_started = 1; return 0; } @@ -396,20 +406,66 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wil_p2p_info *p2p = &wil->p2p; - if (!p2p->p2p_dev_started) + if (!wil->p2p_dev_started) return; wil_dbg_misc(wil, "stop_p2p_device: entered\n"); mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); wil_p2p_stop_radio_operations(wil); - p2p->p2p_dev_started = 0; - mutex_unlock(&wil->p2p_wdev_mutex); + wil->p2p_dev_started = 0; + mutex_unlock(&wil->vif_mutex); mutex_unlock(&wil->mutex); } +static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil, + enum nl80211_iftype new_type) +{ + int i; + struct wireless_dev *wdev; + struct iface_combination_params params = { + .num_different_channels = 1, + }; + + for (i = 0; i < wil->max_vifs; i++) { + if (wil->vifs[i]) { + wdev = vif_to_wdev(wil->vifs[i]); + params.iftype_num[wdev->iftype]++; + } + } + params.iftype_num[new_type]++; + return cfg80211_check_combinations(wil->wiphy, ¶ms); +} + +static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil, + struct wil6210_vif *vif, + enum nl80211_iftype new_type) +{ + int i, ret = 0; + struct wireless_dev *wdev; + struct iface_combination_params params = { + .num_different_channels = 1, + }; + bool check_combos = false; + + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif_pos = wil->vifs[i]; + + if (vif_pos && vif != vif_pos) { + wdev = vif_to_wdev(vif_pos); + params.iftype_num[wdev->iftype]++; + check_combos = true; + } + } + + if (check_combos) { + params.iftype_num[new_type]++; + ret = cfg80211_check_combinations(wil->wiphy, ¶ms); + } + return ret; +} + static struct wireless_dev * wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, @@ -417,51 +473,137 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *p2p_wdev; + struct net_device *ndev_main = wil->main_ndev, *ndev; + struct wil6210_vif *vif; + struct wireless_dev *p2p_wdev, *wdev; + int rc; - wil_dbg_misc(wil, "add_iface\n"); + wil_dbg_misc(wil, "add_iface, type %d\n", type); - if (type != NL80211_IFTYPE_P2P_DEVICE) { - wil_err(wil, "unsupported iftype %d\n", type); - return ERR_PTR(-EINVAL); + /* P2P device is not a real virtual interface, it is a management-only + * interface that shares the main interface. + * Skip concurrency checks here. + */ + if (type == NL80211_IFTYPE_P2P_DEVICE) { + if (wil->p2p_wdev) { + wil_err(wil, "P2P_DEVICE interface already created\n"); + return ERR_PTR(-EINVAL); + } + + p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL); + if (!p2p_wdev) + return ERR_PTR(-ENOMEM); + + p2p_wdev->iftype = type; + p2p_wdev->wiphy = wiphy; + /* use our primary ethernet address */ + ether_addr_copy(p2p_wdev->address, ndev_main->perm_addr); + + wil->p2p_wdev = p2p_wdev; + + return p2p_wdev; } - if (wil->p2p_wdev) { - wil_err(wil, "P2P_DEVICE interface already created\n"); + if (!wil->wiphy->n_iface_combinations) { + wil_err(wil, "virtual interfaces not supported\n"); return ERR_PTR(-EINVAL); } - p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL); - if (!p2p_wdev) - return ERR_PTR(-ENOMEM); + rc = wil_cfg80211_validate_add_iface(wil, type); + if (rc) { + wil_err(wil, "iface validation failed, err=%d\n", rc); + return ERR_PTR(rc); + } - p2p_wdev->iftype = type; - p2p_wdev->wiphy = wiphy; - /* use our primary ethernet address */ - ether_addr_copy(p2p_wdev->address, ndev->perm_addr); + vif = wil_vif_alloc(wil, name, name_assign_type, type); + if (IS_ERR(vif)) + return ERR_CAST(vif); + + ndev = vif_to_ndev(vif); + ether_addr_copy(ndev->perm_addr, ndev_main->perm_addr); + if (is_valid_ether_addr(params->macaddr)) { + ether_addr_copy(ndev->dev_addr, params->macaddr); + } else { + ether_addr_copy(ndev->dev_addr, ndev_main->perm_addr); + ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << vif->mid)) | + 0x2; /* locally administered */ + } + wdev = vif_to_wdev(vif); + ether_addr_copy(wdev->address, ndev->dev_addr); - wil->p2p_wdev = p2p_wdev; + rc = wil_vif_add(wil, vif); + if (rc) + goto out; - return p2p_wdev; + wil_info(wil, "added VIF, mid %d iftype %d MAC %pM\n", + vif->mid, type, wdev->address); + return wdev; +out: + wil_vif_free(vif); + return ERR_PTR(rc); +} + +int wil_vif_prepare_stop(struct wil6210_vif *vif) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); + struct net_device *ndev; + int rc; + + if (wdev->iftype != NL80211_IFTYPE_AP) + return 0; + + ndev = vif_to_ndev(vif); + if (netif_carrier_ok(ndev)) { + rc = wmi_pcp_stop(vif); + if (rc) { + wil_info(wil, "failed to stop AP, status %d\n", + rc); + /* continue */ + } + wil_bcast_fini(vif); + netif_carrier_off(ndev); + } + + return 0; } static int wil_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); + int rc; wil_dbg_misc(wil, "del_iface\n"); - if (wdev != wil->p2p_wdev) { - wil_err(wil, "delete of incorrect interface 0x%p\n", wdev); + if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + if (wdev != wil->p2p_wdev) { + wil_err(wil, "delete of incorrect interface 0x%p\n", + wdev); + return -EINVAL; + } + + wil_cfg80211_stop_p2p_device(wiphy, wdev); + wil_p2p_wdev_free(wil); + return 0; + } + + if (vif->mid == 0) { + wil_err(wil, "cannot remove the main interface\n"); return -EINVAL; } - wil_cfg80211_stop_p2p_device(wiphy, wdev); - wil_p2p_wdev_free(wil); + rc = wil_vif_prepare_stop(vif); + if (rc) + goto out; + + wil_info(wil, "deleted VIF, mid %d iftype %d MAC %pM\n", + vif->mid, wdev->iftype, wdev->address); - return 0; + wil_vif_remove(wil, vif->mid); +out: + return rc; } static int wil_cfg80211_change_iface(struct wiphy *wiphy, @@ -470,12 +612,26 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wireless_dev *wdev = vif_to_wdev(vif); int rc; + bool fw_reset = false; wil_dbg_misc(wil, "change_iface: type=%d\n", type); - if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) { + if (wiphy->n_iface_combinations) { + rc = wil_cfg80211_validate_change_iface(wil, vif, type); + if (rc) { + wil_err(wil, "iface validation failed, err=%d\n", rc); + return rc; + } + } + + /* do not reset FW when there are active VIFs, + * because it can cause significant disruption + */ + if (!wil_has_other_active_ifaces(wil, ndev, true, false) && + netif_running(ndev) && !wil_is_recovery_blocked(wil)) { wil_dbg_misc(wil, "interface is up. resetting...\n"); mutex_lock(&wil->mutex); __wil_down(wil); @@ -484,6 +640,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, if (rc) return rc; + fw_reset = true; } switch (type) { @@ -500,8 +657,18 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, return -EOPNOTSUPP; } - wdev->iftype = type; + if (vif->mid != 0 && wil_has_active_ifaces(wil, true, false)) { + if (!fw_reset) + wil_vif_prepare_stop(vif); + rc = wmi_port_delete(wil, vif->mid); + if (rc) + return rc; + rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr, type); + if (rc) + return rc; + } + wdev->iftype = type; return 0; } @@ -510,6 +677,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = request->wdev; + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); struct { struct wmi_start_scan_cmd cmd; u16 chnl[4]; @@ -537,35 +705,38 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request || wil->p2p.discovery_started) { + mutex_lock(&wil->vif_mutex); + if (vif->scan_request || vif->p2p.discovery_started) { wil_err(wil, "Already scanning\n"); - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); rc = -EAGAIN; goto out; } - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { - if (!wil->p2p.p2p_dev_started) { + if (!wil->p2p_dev_started) { wil_err(wil, "P2P search requested on stopped P2P device\n"); rc = -EIO; goto out; } /* social scan on P2P_DEVICE is handled as p2p search */ if (wil_p2p_is_social_scan(request)) { - wil->scan_request = request; - wil->radio_wdev = wdev; - rc = wil_p2p_search(wil, request); + vif->scan_request = request; + if (vif->mid == 0) + wil->radio_wdev = wdev; + rc = wil_p2p_search(vif, request); if (rc) { - wil->radio_wdev = wil_to_wdev(wil); - wil->scan_request = NULL; + if (vif->mid == 0) + wil->radio_wdev = + wil->main_ndev->ieee80211_ptr; + vif->scan_request = NULL; } goto out; } } - (void)wil_p2p_stop_discovery(wil); + (void)wil_p2p_stop_discovery(vif); wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); @@ -578,18 +749,18 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } if (request->n_ssids) - rc = wmi_set_ssid(wil, request->ssids[0].ssid_len, + rc = wmi_set_ssid(vif, request->ssids[0].ssid_len, request->ssids[0].ssid); else - rc = wmi_set_ssid(wil, 0, NULL); + rc = wmi_set_ssid(vif, 0, NULL); if (rc) { wil_err(wil, "set SSID for scan request failed: %d\n", rc); goto out; } - wil->scan_request = request; - mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); + vif->scan_request = request; + mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO); memset(&cmd, 0, sizeof(cmd)); cmd.cmd.scan_type = WMI_ACTIVE_SCAN; @@ -616,7 +787,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, else wil_dbg_misc(wil, "Scan has no IE's\n"); - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); + rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ, + request->ie_len, request->ie); if (rc) goto out_restore; @@ -625,15 +797,18 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, wil_dbg_misc(wil, "active scan with discovery_mode=1\n"); } - wil->radio_wdev = wdev; - rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + - cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); + if (vif->mid == 0) + wil->radio_wdev = wdev; + rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid, + &cmd, sizeof(cmd.cmd) + + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); out_restore: if (rc) { - del_timer_sync(&wil->scan_timer); - wil->radio_wdev = wil_to_wdev(wil); - wil->scan_request = NULL; + del_timer_sync(&vif->scan_timer); + if (vif->mid == 0) + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; + vif->scan_request = NULL; } out: mutex_unlock(&wil->mutex); @@ -644,27 +819,28 @@ static void wil_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype); mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); - if (!wil->scan_request) + if (!vif->scan_request) goto out; - if (wdev != wil->scan_request->wdev) { + if (wdev != vif->scan_request->wdev) { wil_dbg_misc(wil, "abort scan was called on the wrong iface\n"); goto out; } - if (wil->radio_wdev == wil->p2p_wdev) + if (wdev == wil->p2p_wdev && wil->radio_wdev == wil->p2p_wdev) wil_p2p_stop_radio_operations(wil); else - wil_abort_scan(wil, true); + wil_abort_scan(vif, true); out: - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); mutex_unlock(&wil->mutex); } @@ -715,6 +891,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, struct cfg80211_connect_params *sme) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); struct cfg80211_bss *bss; struct wmi_connect_cmd conn; const u8 *ssid_eid; @@ -723,11 +900,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, int rc = 0; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; - wil_dbg_misc(wil, "connect\n"); + wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid); wil_print_connect_params(wil, sme); - if (test_bit(wil_status_fwconnecting, wil->status) || - test_bit(wil_status_fwconnected, wil->status)) + if (test_bit(wil_vif_fwconnecting, vif->status) || + test_bit(wil_vif_fwconnected, vif->status)) return -EALREADY; if (sme->ie_len > WMI_MAX_IE_LEN) { @@ -758,18 +935,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rc = -ENOENT; goto out; } - wil->privacy = sme->privacy; - wil->pbss = sme->pbss; + vif->privacy = sme->privacy; + vif->pbss = sme->pbss; - if (wil->privacy) { + if (vif->privacy) { /* For secure assoc, remove old keys */ - rc = wmi_del_cipher_key(wil, 0, bss->bssid, + rc = wmi_del_cipher_key(vif, 0, bss->bssid, WMI_KEY_USE_PAIRWISE); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); goto out; } - rc = wmi_del_cipher_key(wil, 0, bss->bssid, + rc = wmi_del_cipher_key(vif, 0, bss->bssid, WMI_KEY_USE_RX_GROUP); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); @@ -781,7 +958,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, * elements. Send it also in case it's empty, to erase previously set * ies in FW. */ - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); + rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); if (rc) goto out; @@ -799,7 +976,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, bss->capability); goto out; } - if (wil->privacy) { + if (vif->privacy) { if (rsn_eid) { /* regular secure connection */ conn.dot11_auth_mode = WMI_AUTH11_SHARED; conn.auth_mode = WMI_AUTH_WPA2_PSK; @@ -831,18 +1008,19 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, ether_addr_copy(conn.bssid, bss->bssid); ether_addr_copy(conn.dst_mac, bss->bssid); - set_bit(wil_status_fwconnecting, wil->status); + set_bit(wil_vif_fwconnecting, vif->status); - rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); + rc = wmi_send(wil, WMI_CONNECT_CMDID, vif->mid, &conn, sizeof(conn)); if (rc == 0) { netif_carrier_on(ndev); - wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); - wil->bss = bss; + if (!wil_has_other_active_ifaces(wil, ndev, false, true)) + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); + vif->bss = bss; /* Connect can take lots of time */ - mod_timer(&wil->connect_timer, + mod_timer(&vif->connect_timer, jiffies + msecs_to_jiffies(5000)); } else { - clear_bit(wil_status_fwconnecting, wil->status); + clear_bit(wil_vif_fwconnecting, vif->status); } out: @@ -857,17 +1035,19 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); - wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code); + wil_dbg_misc(wil, "disconnect: reason=%d, mid=%d\n", + reason_code, vif->mid); - if (!(test_bit(wil_status_fwconnecting, wil->status) || - test_bit(wil_status_fwconnected, wil->status))) { + if (!(test_bit(wil_vif_fwconnecting, vif->status) || + test_bit(wil_vif_fwconnected, vif->status))) { wil_err(wil, "Disconnect was called while disconnected\n"); return 0; } - wil->locally_generated_disc = true; - rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + vif->locally_generated_disc = true; + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0, WMI_DISCONNECT_EVENTID, NULL, 0, WIL6210_DISCONNECT_TO_MS); if (rc) @@ -903,6 +1083,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, const u8 *buf = params->buf; size_t len = params->len, total; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; bool tx_status = false; struct ieee80211_mgmt *mgmt_frame = (void *)buf; @@ -919,7 +1100,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "mgmt_tx\n"); + wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); @@ -940,7 +1121,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cmd->len = cpu_to_le16(len); memcpy(cmd->payload, buf, len); - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total, + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (rc == 0) tx_status = !evt.evt.status; @@ -962,10 +1143,10 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, return 0; } -static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, +static enum wmi_key_usage wil_detect_key_usage(struct wireless_dev *wdev, bool pairwise) { - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wil6210_priv *wil = wdev_to_wil(wdev); enum wmi_key_usage rc; if (pairwise) { @@ -993,7 +1174,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, } static struct wil_sta_info * -wil_find_sta_by_key_usage(struct wil6210_priv *wil, +wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid, enum wmi_key_usage key_usage, const u8 *mac_addr) { int cid = -EINVAL; @@ -1003,9 +1184,9 @@ wil_find_sta_by_key_usage(struct wil6210_priv *wil, /* supplicant provides Rx group key in STA mode with NULL MAC address */ if (mac_addr) - cid = wil_find_cid(wil, mac_addr); + cid = wil_find_cid(wil, mid, mac_addr); else if (key_usage == WMI_KEY_USE_RX_GROUP) - cid = wil_find_cid_by_idx(wil, 0); + cid = wil_find_cid_by_idx(wil, mid, 0); if (cid < 0) { wil_err(wil, "No CID for %pM %s\n", mac_addr, key_usage_str[key_usage]); @@ -1082,9 +1263,12 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, struct key_params *params) { int rc; + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); - struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, + struct wireless_dev *wdev = vif_to_wdev(vif); + enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise); + struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid, + key_usage, mac_addr); if (!params) { @@ -1114,7 +1298,7 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, return -EINVAL; } - rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, + rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len, params->key, key_usage); if (!rc) wil_set_crypto_rx(key_index, key_usage, cs, params); @@ -1127,9 +1311,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, u8 key_index, bool pairwise, const u8 *mac_addr) { + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); - struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, + struct wireless_dev *wdev = vif_to_wdev(vif); + enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise); + struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid, + key_usage, mac_addr); wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr, @@ -1142,7 +1329,7 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, if (!IS_ERR_OR_NULL(cs)) wil_del_rx_key(key_index, key_usage, cs); - return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); + return wmi_del_cipher_key(vif, key_index, mac_addr, key_usage); } /* Need to be present or wiphy_new() will WARN */ @@ -1179,10 +1366,11 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); wil_dbg_misc(wil, "cancel_remain_on_channel\n"); - return wil_p2p_cancel_listen(wil, cookie); + return wil_p2p_cancel_listen(vif, cookie); } /** @@ -1275,11 +1463,10 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b) } /* internal functions for device reset and starting AP */ -static int _wil_cfg80211_set_ies(struct wiphy *wiphy, +static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, struct cfg80211_beacon_data *bcon) { int rc; - struct wil6210_priv *wil = wiphy_to_wil(wiphy); u16 len = 0, proberesp_len = 0; u8 *ies = NULL, *proberesp = NULL; @@ -1300,20 +1487,21 @@ static int _wil_cfg80211_set_ies(struct wiphy *wiphy, if (rc) goto out; - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies); + rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP, len, ies); if (rc) goto out; if (bcon->assocresp_ies) - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, + rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); else - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies); + rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP, len, ies); #if 0 /* to use beacon IE's, remove this #if 0 */ if (rc) goto out; - rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); + rc = wmi_set_ie(vif, WMI_FRAME_BEACON, + bcon->tail_len, bcon->tail); #endif out: kfree(ies); @@ -1328,6 +1516,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, u8 hidden_ssid, u32 pbss) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); int rc; struct wireless_dev *wdev = ndev->ieee80211_ptr; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); @@ -1336,7 +1525,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, if (pbss) wmi_nettype = WMI_NETTYPE_P2P; - wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go); + wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go); if (is_go && !pbss) { wil_err(wil, "P2P GO must be in PBSS\n"); return -ENOTSUPP; @@ -1346,42 +1535,46 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, mutex_lock(&wil->mutex); - __wil_down(wil); - rc = __wil_up(wil); - if (rc) - goto out; + if (!wil_has_other_active_ifaces(wil, ndev, true, false)) { + __wil_down(wil); + rc = __wil_up(wil); + if (rc) + goto out; + } - rc = wmi_set_ssid(wil, ssid_len, ssid); + rc = wmi_set_ssid(vif, ssid_len, ssid); if (rc) goto out; - rc = _wil_cfg80211_set_ies(wiphy, bcon); + rc = _wil_cfg80211_set_ies(vif, bcon); if (rc) goto out; - wil->privacy = privacy; - wil->channel = chan; - wil->hidden_ssid = hidden_ssid; - wil->pbss = pbss; + vif->privacy = privacy; + vif->channel = chan; + vif->hidden_ssid = hidden_ssid; + vif->pbss = pbss; netif_carrier_on(ndev); - wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); + if (!wil_has_other_active_ifaces(wil, ndev, false, true)) + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); - rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go); + rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go); if (rc) goto err_pcp_start; - rc = wil_bcast_init(wil); + rc = wil_bcast_init(vif); if (rc) goto err_bcast; goto out; /* success */ err_bcast: - wmi_pcp_stop(wil); + wmi_pcp_stop(vif); err_pcp_start: netif_carrier_off(ndev); - wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); + if (!wil_has_other_active_ifaces(wil, ndev, false, true)) + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); out: mutex_unlock(&wil->mutex); return rc; @@ -1392,10 +1585,11 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); int rc; u32 privacy = 0; - wil_dbg_misc(wil, "change_beacon\n"); + wil_dbg_misc(wil, "change_beacon, mid=%d\n", vif->mid); wil_print_bcon_data(bcon); if (bcon->tail && @@ -1404,20 +1598,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, privacy = 1; /* in case privacy has changed, need to restart the AP */ - if (wil->privacy != privacy) { + if (vif->privacy != privacy) { struct wireless_dev *wdev = ndev->ieee80211_ptr; wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n", - wil->privacy, privacy); + vif->privacy, privacy); rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, wdev->ssid_len, privacy, wdev->beacon_interval, - wil->channel, bcon, - wil->hidden_ssid, - wil->pbss); + vif->channel, bcon, + vif->hidden_ssid, + vif->pbss); } else { - rc = _wil_cfg80211_set_ies(wiphy, bcon); + rc = _wil_cfg80211_set_ies(vif, bcon); } return rc; @@ -1484,20 +1678,27 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(ndev); + bool last; - wil_dbg_misc(wil, "stop_ap\n"); + wil_dbg_misc(wil, "stop_ap, mid=%d\n", vif->mid); netif_carrier_off(ndev); - wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); - wil_set_recovery_state(wil, fw_recovery_idle); - - set_bit(wil_status_resetting, wil->status); + last = !wil_has_other_active_ifaces(wil, ndev, false, true); + if (last) { + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); + wil_set_recovery_state(wil, fw_recovery_idle); + set_bit(wil_status_resetting, wil->status); + } mutex_lock(&wil->mutex); - wmi_pcp_stop(wil); + wmi_pcp_stop(vif); - __wil_down(wil); + if (last) + __wil_down(wil); + else + wil_bcast_fini(vif); mutex_unlock(&wil->mutex); @@ -1509,9 +1710,11 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy, const u8 *mac, struct station_parameters *params) { + struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid); + wil_dbg_misc(wil, "add station %pM aid %d mid %d\n", + mac, params->aid, vif->mid); if (!disable_ap_sme) { wil_err(wil, "not supported with AP SME enabled\n"); @@ -1523,20 +1726,21 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy, return -EINVAL; } - return wmi_new_sta(wil, mac, params->aid); + return wmi_new_sta(vif, mac, params->aid); } static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { + struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac, - params->reason_code); + wil_dbg_misc(wil, "del_station: %pM, reason=%d mid=%d\n", + params->mac, params->reason_code, vif->mid); mutex_lock(&wil->mutex); - wil6210_disconnect(wil, params->mac, params->reason_code, false); + wil6210_disconnect(vif, params->mac, params->reason_code, false); mutex_unlock(&wil->mutex); return 0; @@ -1547,13 +1751,15 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy, const u8 *mac, struct station_parameters *params) { + struct wil6210_vif *vif = ndev_to_vif(dev); struct wil6210_priv *wil = wiphy_to_wil(wiphy); int authorize; int cid, i; struct vring_tx_data *txdata = NULL; - wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac, - params->sta_flags_mask, params->sta_flags_set); + wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n", + mac, params->sta_flags_mask, params->sta_flags_set, + vif->mid); if (!disable_ap_sme) { wil_dbg_misc(wil, "not supported with AP SME enabled\n"); @@ -1563,7 +1769,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy, if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) return 0; - cid = wil_find_cid(wil, mac); + cid = wil_find_cid(wil, vif->mid, mac); if (cid < 0) { wil_err(wil, "station not found\n"); return -ENOLINK; @@ -1590,63 +1796,67 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy, /* probe_client handling */ static void wil_probe_client_handle(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct wil_probe_client_req *req) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = vif_to_ndev(vif); struct wil_sta_info *sta = &wil->sta[req->cid]; /* assume STA is alive if it is still connected, * else FW will disconnect it */ bool alive = (sta->status == wil_sta_connected); - cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL); + cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, + 0, false, GFP_KERNEL); } -static struct list_head *next_probe_client(struct wil6210_priv *wil) +static struct list_head *next_probe_client(struct wil6210_vif *vif) { struct list_head *ret = NULL; - mutex_lock(&wil->probe_client_mutex); + mutex_lock(&vif->probe_client_mutex); - if (!list_empty(&wil->probe_client_pending)) { - ret = wil->probe_client_pending.next; + if (!list_empty(&vif->probe_client_pending)) { + ret = vif->probe_client_pending.next; list_del(ret); } - mutex_unlock(&wil->probe_client_mutex); + mutex_unlock(&vif->probe_client_mutex); return ret; } void wil_probe_client_worker(struct work_struct *work) { - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - probe_client_worker); + struct wil6210_vif *vif = container_of(work, struct wil6210_vif, + probe_client_worker); + struct wil6210_priv *wil = vif_to_wil(vif); struct wil_probe_client_req *req; struct list_head *lh; - while ((lh = next_probe_client(wil)) != NULL) { + while ((lh = next_probe_client(vif)) != NULL) { req = list_entry(lh, struct wil_probe_client_req, list); - wil_probe_client_handle(wil, req); + wil_probe_client_handle(wil, vif, req); kfree(req); } } -void wil_probe_client_flush(struct wil6210_priv *wil) +void wil_probe_client_flush(struct wil6210_vif *vif) { struct wil_probe_client_req *req, *t; + struct wil6210_priv *wil = vif_to_wil(vif); wil_dbg_misc(wil, "probe_client_flush\n"); - mutex_lock(&wil->probe_client_mutex); + mutex_lock(&vif->probe_client_mutex); - list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) { + list_for_each_entry_safe(req, t, &vif->probe_client_pending, list) { list_del(&req->list); kfree(req); } - mutex_unlock(&wil->probe_client_mutex); + mutex_unlock(&vif->probe_client_mutex); } static int wil_cfg80211_probe_client(struct wiphy *wiphy, @@ -1654,10 +1864,12 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy, const u8 *peer, u64 *cookie) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); struct wil_probe_client_req *req; - int cid = wil_find_cid(wil, peer); + int cid = wil_find_cid(wil, vif->mid, peer); - wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid); + wil_dbg_misc(wil, "probe_client: %pM => CID %d MID %d\n", + peer, cid, vif->mid); if (cid < 0) return -ENOLINK; @@ -1669,12 +1881,12 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy, req->cid = cid; req->cookie = cid; - mutex_lock(&wil->probe_client_mutex); - list_add_tail(&req->list, &wil->probe_client_pending); - mutex_unlock(&wil->probe_client_mutex); + mutex_lock(&vif->probe_client_mutex); + list_add_tail(&req->list, &vif->probe_client_pending); + mutex_unlock(&vif->probe_client_mutex); *cookie = req->cookie; - queue_work(wil->wq_service, &wil->probe_client_worker); + queue_work(wil->wq_service, &vif->probe_client_worker); return 0; } @@ -1683,11 +1895,12 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, struct bss_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); if (params->ap_isolate >= 0) { - wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n", - wil->ap_isolate, params->ap_isolate); - wil->ap_isolate = params->ap_isolate; + wil_dbg_misc(wil, "change_bss: ap_isolate MID %d, %d => %d\n", + vif->mid, vif->ap_isolate, params->ap_isolate); + vif->ap_isolate = params->ap_isolate; } return 0; @@ -1731,10 +1944,10 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy, wil_dbg_pm(wil, "suspending\n"); mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); wil_p2p_stop_radio_operations(wil); - wil_abort_scan(wil, true); - mutex_unlock(&wil->p2p_wdev_mutex); + wil_abort_scan_all_vifs(wil, true); + mutex_unlock(&wil->vif_mutex); mutex_unlock(&wil->mutex); out: @@ -1756,8 +1969,12 @@ wil_cfg80211_sched_scan_start(struct wiphy *wiphy, struct cfg80211_sched_scan_request *request) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); int i, rc; + if (vif->mid != 0) + return -EOPNOTSUPP; + wil_dbg_misc(wil, "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n", request->n_ssids, request->ie_len, request->flags); @@ -1791,7 +2008,8 @@ wil_cfg80211_sched_scan_start(struct wiphy *wiphy, i, sp->interval, sp->iterations); } - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); + rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ, + request->ie_len, request->ie); if (rc) return rc; return wmi_start_sched_scan(wil, request); @@ -1802,8 +2020,12 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil6210_vif *vif = ndev_to_vif(dev); int rc; + if (vif->mid != 0) + return -EOPNOTSUPP; + rc = wmi_stop_sched_scan(wil); /* device would return error if it thinks PNO is already stopped. * ignore the return code so user space and driver gets back in-sync @@ -1892,57 +2114,132 @@ static void wil_wiphy_init(struct wiphy *wiphy) #endif } -struct wireless_dev *wil_cfg80211_init(struct device *dev) +int wil_cfg80211_iface_combinations_from_fw( + struct wil6210_priv *wil, const struct wil_fw_record_concurrency *conc) { - int rc = 0; - struct wireless_dev *wdev; + struct wiphy *wiphy = wil_to_wiphy(wil); + u32 total_limits = 0; + u16 n_combos; + const struct wil_fw_concurrency_combo *combo; + const struct wil_fw_concurrency_limit *limit; + struct ieee80211_iface_combination *iface_combinations; + struct ieee80211_iface_limit *iface_limit; + int i, j; + + if (wiphy->iface_combinations) { + wil_dbg_misc(wil, "iface_combinations already set, skipping\n"); + return 0; + } - dev_dbg(dev, "%s()\n", __func__); + combo = conc->combos; + n_combos = le16_to_cpu(conc->n_combos); + for (i = 0; i < n_combos; i++) { + total_limits += combo->n_limits; + limit = combo->limits + combo->n_limits; + combo = (struct wil_fw_concurrency_combo *)limit; + } - wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); - if (!wdev) - return ERR_PTR(-ENOMEM); + iface_combinations = + kzalloc(n_combos * sizeof(struct ieee80211_iface_combination) + + total_limits * sizeof(struct ieee80211_iface_limit), + GFP_KERNEL); + if (!iface_combinations) + return -ENOMEM; + iface_limit = (struct ieee80211_iface_limit *)(iface_combinations + + n_combos); + combo = conc->combos; + for (i = 0; i < n_combos; i++) { + iface_combinations[i].max_interfaces = combo->max_interfaces; + iface_combinations[i].num_different_channels = + combo->n_diff_channels; + iface_combinations[i].beacon_int_infra_match = + combo->same_bi; + iface_combinations[i].n_limits = combo->n_limits; + wil_dbg_misc(wil, + "iface_combination %d: max_if %d, num_ch %d, bi_match %d\n", + i, iface_combinations[i].max_interfaces, + iface_combinations[i].num_different_channels, + iface_combinations[i].beacon_int_infra_match); + limit = combo->limits; + for (j = 0; j < combo->n_limits; j++) { + iface_limit[j].max = le16_to_cpu(limit[j].max); + iface_limit[j].types = le16_to_cpu(limit[j].types); + wil_dbg_misc(wil, + "limit %d: max %d types 0x%x\n", j, + iface_limit[j].max, iface_limit[j].types); + } + iface_combinations[i].limits = iface_limit; + iface_limit += combo->n_limits; + limit += combo->n_limits; + combo = (struct wil_fw_concurrency_combo *)limit; + } - wdev->wiphy = wiphy_new(&wil_cfg80211_ops, - sizeof(struct wil6210_priv)); - if (!wdev->wiphy) { - rc = -ENOMEM; - goto out; + wil_dbg_misc(wil, "multiple VIFs supported, n_mids %d\n", conc->n_mids); + wil->max_vifs = conc->n_mids + 1; /* including main interface */ + if (wil->max_vifs > WIL_MAX_VIFS) { + wil_info(wil, "limited number of VIFs supported(%d, FW %d)\n", + WIL_MAX_VIFS, wil->max_vifs); + wil->max_vifs = WIL_MAX_VIFS; } + wiphy->n_iface_combinations = n_combos; + wiphy->iface_combinations = iface_combinations; + return 0; +} - set_wiphy_dev(wdev->wiphy, dev); - wil_wiphy_init(wdev->wiphy); +struct wil6210_priv *wil_cfg80211_init(struct device *dev) +{ + struct wiphy *wiphy; + struct wil6210_priv *wil; + struct ieee80211_channel *ch; - return wdev; + dev_dbg(dev, "%s()\n", __func__); -out: - kfree(wdev); + /* Note: the wireless_dev structure is no longer allocated here. + * Instead, it is allocated as part of the net_device structure + * for main interface and each VIF. + */ + wiphy = wiphy_new(&wil_cfg80211_ops, sizeof(struct wil6210_priv)); + if (!wiphy) + return ERR_PTR(-ENOMEM); - return ERR_PTR(rc); + set_wiphy_dev(wiphy, dev); + wil_wiphy_init(wiphy); + + wil = wiphy_to_wil(wiphy); + wil->wiphy = wiphy; + + /* default monitor channel */ + ch = wiphy->bands[NL80211_BAND_60GHZ]->channels; + cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT); + + return wil; } -void wil_wdev_free(struct wil6210_priv *wil) +void wil_cfg80211_deinit(struct wil6210_priv *wil) { - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wiphy *wiphy = wil_to_wiphy(wil); dev_dbg(wil_to_dev(wil), "%s()\n", __func__); - if (!wdev) + if (!wiphy) return; - wiphy_free(wdev->wiphy); - kfree(wdev); + kfree(wiphy->iface_combinations); + wiphy->iface_combinations = NULL; + + wiphy_free(wiphy); + /* do not access wil6210_priv after returning from here */ } void wil_p2p_wdev_free(struct wil6210_priv *wil) { struct wireless_dev *p2p_wdev; - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); p2p_wdev = wil->p2p_wdev; wil->p2p_wdev = NULL; - wil->radio_wdev = wil_to_wdev(wil); - mutex_unlock(&wil->p2p_wdev_mutex); + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; + mutex_unlock(&wil->vif_mutex); if (p2p_wdev) { cfg80211_unregister_wdev(p2p_wdev); kfree(p2p_wdev); @@ -1970,6 +2267,7 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, const void *data, int data_len) { struct wil6210_priv *wil = wdev_to_wil(wdev); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; u16 sector_index; @@ -2026,8 +2324,8 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, cmd.sector_type = sector_type; cmd.rf_modules_vec = rf_modules_vec & 0xFF; memset(&reply, 0, sizeof(reply)); - rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd), - WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, + rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), 500); if (rc) @@ -2089,6 +2387,7 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, const void *data, int data_len) { struct wil6210_priv *wil = wdev_to_wil(wdev); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc, tmp; struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1]; @@ -2183,8 +2482,8 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, cmd.rf_modules_vec = rf_modules_vec & 0xFF; memset(&reply, 0, sizeof(reply)); - rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd), - WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, + rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), 500); if (rc) @@ -2197,6 +2496,7 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, const void *data, int data_len) { struct wil6210_priv *wil = wdev_to_wil(wdev); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; u8 sector_type, mac_addr[ETH_ALEN]; @@ -2230,13 +2530,13 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, if (tb[QCA_ATTR_MAC_ADDR]) { ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR])); - cid = wil_find_cid(wil, mac_addr); + cid = wil_find_cid(wil, vif->mid, mac_addr); if (cid < 0) { wil_err(wil, "invalid MAC address %pM\n", mac_addr); return -ENOENT; } } else { - if (test_bit(wil_status_fwconnected, wil->status)) { + if (test_bit(wil_vif_fwconnected, vif->status)) { wil_err(wil, "must specify MAC address when connected\n"); return -EINVAL; } @@ -2246,7 +2546,7 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, cmd.cid = (u8)cid; cmd.sector_type = sector_type; memset(&reply, 0, sizeof(reply)); - rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, + rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, &reply, sizeof(reply), @@ -2279,7 +2579,7 @@ nla_put_failure: } static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, - u16 sector_index, + u8 mid, u16 sector_index, u8 sector_type, u8 cid) { struct wmi_set_selected_rf_sector_index_cmd cmd; @@ -2294,7 +2594,7 @@ static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, cmd.sector_type = sector_type; cmd.cid = (u8)cid; memset(&reply, 0, sizeof(reply)); - rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, + rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid, &cmd, sizeof(cmd), WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, &reply, sizeof(reply), @@ -2309,6 +2609,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, const void *data, int data_len) { struct wil6210_priv *wil = wdev_to_wil(wdev); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; u16 sector_index; @@ -2348,7 +2649,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, if (tb[QCA_ATTR_MAC_ADDR]) { ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR])); if (!is_broadcast_ether_addr(mac_addr)) { - cid = wil_find_cid(wil, mac_addr); + cid = wil_find_cid(wil, vif->mid, mac_addr); if (cid < 0) { wil_err(wil, "invalid MAC address %pM\n", mac_addr); @@ -2362,7 +2663,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, cid = -1; } } else { - if (test_bit(wil_status_fwconnected, wil->status)) { + if (test_bit(wil_vif_fwconnected, vif->status)) { wil_err(wil, "must specify MAC address when connected\n"); return -EINVAL; } @@ -2370,17 +2671,20 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, } if (cid >= 0) { - rc = wil_rf_sector_wmi_set_selected(wil, sector_index, + rc = wil_rf_sector_wmi_set_selected(wil, vif->mid, sector_index, sector_type, cid); } else { /* unlock all cids */ rc = wil_rf_sector_wmi_set_selected( - wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type, - WIL_CID_ALL); + wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX, + sector_type, WIL_CID_ALL); if (rc == -EINVAL) { for (i = 0; i < WIL6210_MAX_CID; i++) { + if (wil->sta[i].mid != vif->mid) + continue; rc = wil_rf_sector_wmi_set_selected( - wil, WMI_INVALID_RF_SECTOR_INDEX, + wil, vif->mid, + WMI_INVALID_RF_SECTOR_INDEX, sector_type, i); /* the FW will silently ignore and return * success for unused cid, so abort the loop diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index 217a4591bde4..a9befb971cc4 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2013,2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -25,7 +26,7 @@ void __wil_err(struct wil6210_priv *wil, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - netdev_err(wil_to_ndev(wil), "%pV", &vaf); + netdev_err(wil->main_ndev, "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); } @@ -41,7 +42,7 @@ void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - netdev_err(wil_to_ndev(wil), "%pV", &vaf); + netdev_err(wil->main_ndev, "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); } @@ -57,7 +58,7 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - netdev_dbg(wil_to_ndev(wil), "%pV", &vaf); + netdev_dbg(wil->main_ndev, "%pV", &vaf); trace_wil6210_log_dbg(&vaf); va_end(args); } @@ -70,7 +71,7 @@ void __wil_info(struct wil6210_priv *wil, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - netdev_info(wil_to_ndev(wil), "%pV", &vaf); + netdev_info(wil->main_ndev, "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 4a4888246e8c..8c90b3111f0b 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -621,7 +622,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct wil6210_priv *wil = file->private_data; - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; /** * BUG: @@ -716,27 +717,44 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf, if (rc < 2) return -EINVAL; - if (0 == strcmp(cmd, "add")) { - if (rc < 3) { - wil_err(wil, "BACK: add require at least 2 params\n"); + if ((strcmp(cmd, "add") == 0) || + (strcmp(cmd, "del_tx") == 0)) { + struct vring_tx_data *txdata; + + if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) { + wil_err(wil, "BACK: invalid ring id %d\n", p1); return -EINVAL; } - if (rc < 4) - p3 = 0; - wmi_addba(wil, p1, p2, p3); - } else if (0 == strcmp(cmd, "del_tx")) { - if (rc < 3) - p2 = WLAN_REASON_QSTA_LEAVE_QBSS; - wmi_delba_tx(wil, p1, p2); - } else if (0 == strcmp(cmd, "del_rx")) { + txdata = &wil->vring_tx_data[p1]; + if (strcmp(cmd, "add") == 0) { + if (rc < 3) { + wil_err(wil, "BACK: add require at least 2 params\n"); + return -EINVAL; + } + if (rc < 4) + p3 = 0; + wmi_addba(wil, txdata->mid, p1, p2, p3); + } else { + if (rc < 3) + p2 = WLAN_REASON_QSTA_LEAVE_QBSS; + wmi_delba_tx(wil, txdata->mid, p1, p2); + } + } else if (strcmp(cmd, "del_rx") == 0) { + struct wil_sta_info *sta; + if (rc < 3) { wil_err(wil, "BACK: del_rx require at least 2 params\n"); return -EINVAL; } + if (p1 < 0 || p1 >= WIL6210_MAX_CID) { + wil_err(wil, "BACK: invalid CID %d\n", p1); + return -EINVAL; + } if (rc < 4) p3 = WLAN_REASON_QSTA_LEAVE_QBSS; - wmi_delba_rx(wil, mk_cidxtid(p1, p2), p3); + sta = &wil->sta[p1]; + wmi_delba_rx(wil, sta->mid, mk_cidxtid(p1, p2), p3); } else { wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd); return -EINVAL; @@ -855,7 +873,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, { struct wil6210_priv *wil = file->private_data; struct wiphy *wiphy = wil_to_wiphy(wil); - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; struct cfg80211_mgmt_tx_params params; int rc; void *frame; @@ -890,6 +908,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct wil6210_priv *wil = file->private_data; + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_cmd_hdr *wmi; void *cmd; int cmdlen = len - sizeof(struct wmi_cmd_hdr); @@ -912,7 +931,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, cmd = (cmdlen > 0) ? &wmi[1] : NULL; cmdid = le16_to_cpu(wmi->command_id); - rc1 = wmi_send(wil, cmdid, cmd, cmdlen); + rc1 = wmi_send(wil, cmdid, vif->mid, cmd, cmdlen); kfree(wmi); wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1); @@ -1050,6 +1069,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) int rc; int i; struct wil6210_priv *wil = s->private; + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_notify_req_cmd cmd = { .interval_usec = 0, }; @@ -1062,7 +1082,8 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) u32 status; cmd.cid = i; - rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); /* if reply is all-0, ignore this CID */ @@ -1155,7 +1176,7 @@ static const struct file_operations fops_temp = { static int wil_freq_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; seq_printf(s, "Freq = %d\n", freq); @@ -1185,6 +1206,8 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + struct wil6210_vif *vif; + u8 mid; switch (p->status) { case wil_sta_unused: @@ -1197,16 +1220,24 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) status = "connected"; break; } - seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); + mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX; + seq_printf(s, "[%d][MID %d] %pM %s\n", + i, mid, p->addr, status); - if (p->status == wil_sta_connected) { - rc = wil_cid_fill_sinfo(wil, i, &sinfo); + if (p->status != wil_sta_connected) + continue; + + vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL; + if (vif) { + rc = wil_cid_fill_sinfo(vif, i, &sinfo); if (rc) return rc; seq_printf(s, " Tx_mcs = %d\n", sinfo.txrate.mcs); seq_printf(s, " Rx_mcs = %d\n", sinfo.rxrate.mcs); seq_printf(s, " SQ = %d\n", sinfo.signal); + } else { + seq_puts(s, " INVALID MID\n"); } } @@ -1229,7 +1260,7 @@ static const struct file_operations fops_link = { static int wil_info_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; int is_ac = power_supply_is_system_supplied(); int rx = atomic_xchg(&wil->isr_count_rx, 0); int tx = atomic_xchg(&wil->isr_count_tx, 0); @@ -1398,6 +1429,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; u8 aid = 0; + u8 mid; switch (p->status) { case wil_sta_unused: @@ -1411,7 +1443,9 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) aid = p->aid; break; } - seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid); + mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX; + seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status, + mid, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); @@ -1461,6 +1495,42 @@ static const struct file_operations fops_sta = { .llseek = seq_lseek, }; +static int wil_mids_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + struct wil6210_vif *vif; + struct net_device *ndev; + int i; + + mutex_lock(&wil->vif_mutex); + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + + if (vif) { + ndev = vif_to_ndev(vif); + seq_printf(s, "[%d] %pM %s\n", i, ndev->dev_addr, + ndev->name); + } else { + seq_printf(s, "[%d] unused\n", i); + } + } + mutex_unlock(&wil->vif_mutex); + + return 0; +} + +static int wil_mids_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_mids_debugfs_show, inode->i_private); +} + +static const struct file_operations fops_mids = { + .open = wil_mids_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1715,6 +1785,7 @@ static const struct { {"mbox", 0444, &fops_mbox}, {"vrings", 0444, &fops_vring}, {"stations", 0444, &fops_sta}, + {"mids", 0444, &fops_mids}, {"desc", 0444, &fops_txdesc}, {"bf", 0444, &fops_bf}, {"mem_val", 0644, &fops_memread}, @@ -1773,11 +1844,9 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, /* fields in struct wil6210_priv */ static const struct dbg_off dbg_wil_off[] = { - WIL_FIELD(privacy, 0444, doff_u32), WIL_FIELD(status[0], 0644, doff_ulong), WIL_FIELD(hw_version, 0444, doff_x32), WIL_FIELD(recovery_count, 0444, doff_u32), - WIL_FIELD(ap_isolate, 0444, doff_u32), WIL_FIELD(discovery_mode, 0644, doff_u8), WIL_FIELD(chip_revision, 0444, doff_u8), WIL_FIELD(abft_len, 0644, doff_u8), diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 66200f616a37..e7ff41e623d2 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -74,12 +75,13 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *cp) { struct wil6210_priv *wil = ndev_to_wil(ndev); + struct wireless_dev *wdev = ndev->ieee80211_ptr; int ret; wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n"); return -EINVAL; } diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h index 2c7b24f61587..3e7a28045cab 100644 --- a/drivers/net/wireless/ath/wil6210/fw.h +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -14,6 +14,8 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#ifndef __WIL_FW_H__ +#define __WIL_FW_H__ #define WIL_FW_SIGNATURE (0x36323130) /* '0126' */ #define WIL_FW_FMT_VERSION (1) /* format version driver supports */ @@ -71,7 +73,39 @@ struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */ struct wil_fw_record_comment_hdr hdr; /* capabilities (variable size), see enum wmi_fw_capability */ u8 capabilities[0]; -}; +} __packed; + +/* FW VIF concurrency encoded inside a comment record + * Format is similar to wiphy->iface_combinations + */ +#define WIL_FW_CONCURRENCY_MAGIC (0xfedccdef) +#define WIL_FW_CONCURRENCY_REC_VER 1 +struct wil_fw_concurrency_limit { + __le16 max; /* maximum number of interfaces of these types */ + __le16 types; /* interface types (bit mask of enum nl80211_iftype) */ +} __packed; + +struct wil_fw_concurrency_combo { + u8 n_limits; /* number of wil_fw_concurrency_limit entries */ + u8 max_interfaces; /* max number of concurrent interfaces allowed */ + u8 n_diff_channels; /* total number of different channels allowed */ + u8 same_bi; /* for APs, 1 if all APs must have same BI */ + /* keep last - concurrency limits, variable size by n_limits */ + struct wil_fw_concurrency_limit limits[0]; +} __packed; + +struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */ + /* identifies concurrency record */ + __le32 magic; + /* structure version, currently always 1 */ + u8 version; + /* maximum number of supported MIDs _in addition_ to MID 0 */ + u8 n_mids; + /* number of concurrency combinations that follow */ + __le16 n_combos; + /* keep last - combinations, variable size by n_combos */ + struct wil_fw_concurrency_combo combos[0]; +} __packed; /* brd file info encoded inside a comment record */ #define WIL_BRD_FILE_MAGIC (0xabcddcbb) @@ -175,3 +209,5 @@ struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */ __le32 command; struct wil_fw_data_gw4 data[0]; /* total size [data_size], see above */ } __packed; + +#endif /* __WIL_FW_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 914c0106e94b..718161b829c2 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -136,8 +136,8 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data, size_t capa_size; if (size < sizeof(*rec)) { - wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, - data, size, true); + wil_err_fw(wil, "capabilities record too short: %zu\n", size); + /* let the FW load anyway */ return 0; } @@ -158,8 +158,7 @@ fw_handle_brd_file(struct wil6210_priv *wil, const void *data, const struct wil_fw_record_brd_file *rec = data; if (size < sizeof(*rec)) { - wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, - data, size, true); + wil_err_fw(wil, "brd_file record too short: %zu\n", size); return 0; } @@ -173,6 +172,44 @@ fw_handle_brd_file(struct wil6210_priv *wil, const void *data, } static int +fw_handle_concurrency(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_concurrency *rec = data; + const struct wil_fw_concurrency_combo *combo; + const struct wil_fw_concurrency_limit *limit; + size_t remain, lsize; + int i, n_combos; + + if (size < sizeof(*rec)) { + wil_err_fw(wil, "concurrency record too short: %zu\n", size); + /* continue, let the FW load anyway */ + return 0; + } + + n_combos = le16_to_cpu(rec->n_combos); + remain = size - offsetof(struct wil_fw_record_concurrency, combos); + combo = rec->combos; + for (i = 0; i < n_combos; i++) { + if (remain < sizeof(*combo)) + goto out_short; + remain -= sizeof(*combo); + limit = combo->limits; + lsize = combo->n_limits * sizeof(*limit); + if (remain < lsize) + goto out_short; + remain -= lsize; + limit += combo->n_limits; + combo = (struct wil_fw_concurrency_combo *)limit; + } + + return wil_cfg80211_iface_combinations_from_fw(wil, rec); +out_short: + wil_err_fw(wil, "concurrency record truncated\n"); + return 0; +} + +static int fw_handle_comment(struct wil6210_priv *wil, const void *data, size_t size) { @@ -194,6 +231,13 @@ fw_handle_comment(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n"); rc = fw_handle_brd_file(wil, data, size); break; + case WIL_FW_CONCURRENCY_MAGIC: + wil_dbg_fw(wil, "magic is WIL_FW_CONCURRENCY_MAGIC\n"); + rc = fw_handle_concurrency(wil, data, size); + break; + default: + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, + data, size, true); } return rc; diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 1835187ea075..84e9840c1752 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -127,7 +127,7 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil) void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { - bool unmask_rx_htrsh = test_bit(wil_status_fwconnected, wil->status); + bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0; wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC), unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH); @@ -188,12 +188,14 @@ void wil_unmask_irq(struct wil6210_priv *wil) void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { + struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; + wil_dbg_irq(wil, "configure_interrupt_moderation\n"); /* disable interrupt moderation for monitor * to get better timestamp precision */ - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) + if (wdev->iftype == NL80211_IFTYPE_MONITOR) return; /* Disable and clear tx counter before (re)configuration */ @@ -340,7 +342,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) static void wil_notify_fw_error(struct wil6210_priv *wil) { - struct device *dev = &wil_to_ndev(wil)->dev; + struct device *dev = &wil->main_ndev->dev; char *envp[3] = { [0] = "SOURCE=wil6210", [1] = "EVENT=FW_ERROR", diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 0c61a6c13991..a4b413e8d55a 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -160,24 +160,34 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, } } -static void wil_disconnect_cid(struct wil6210_priv *wil, int cid, +static void wil_disconnect_cid(struct wil6210_vif *vif, int cid, u16 reason_code, bool from_event) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { uint i; - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); struct wil_sta_info *sta = &wil->sta[cid]; might_sleep(); - wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n", - cid, sta->status); + wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n", + cid, sta->mid, sta->status); /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { + if (vif->mid != sta->mid) { + wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", + vif->mid); + /* let FW override sta->mid but be more strict with + * user space requests + */ + if (!from_event) + return; + } if (!from_event) { bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ? disable_ap_sme : false; - wmi_disconnect_sta(wil, sta->addr, reason_code, + wmi_disconnect_sta(vif, sta->addr, reason_code, true, del_sta); } @@ -191,6 +201,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) break; } sta->status = wil_sta_unused; + sta->mid = U8_MAX; } /* reorder buffers */ for (i = 0; i < WIL_STA_TID_NUM; i++) { @@ -216,28 +227,33 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) memset(&sta->stats, 0, sizeof(sta->stats)); } -static bool wil_is_connected(struct wil6210_priv *wil) +static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid) { int i; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { - if (wil->sta[i].status == wil_sta_connected) + if (wil->sta[i].mid == mid && + wil->sta[i].status == wil_sta_connected) return true; } return false; } -static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, +static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, u16 reason_code, bool from_event) { + struct wil6210_priv *wil = vif_to_wil(vif); int cid = -ENOENT; - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev; + struct wireless_dev *wdev; - if (unlikely(!ndev)) + if (unlikely(!vif)) return; + ndev = vif_to_ndev(vif); + wdev = vif_to_wdev(vif); + might_sleep(); wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid, reason_code, from_event ? "+" : "-"); @@ -254,48 +270,51 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, */ if (bssid && !is_broadcast_ether_addr(bssid) && !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) { - cid = wil_find_cid(wil, bssid); + cid = wil_find_cid(wil, vif->mid, bssid); wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n", bssid, cid, reason_code); if (cid >= 0) /* disconnect 1 peer */ - wil_disconnect_cid(wil, cid, reason_code, from_event); + wil_disconnect_cid(vif, cid, reason_code, from_event); } else { /* all */ wil_dbg_misc(wil, "Disconnect all\n"); for (cid = 0; cid < WIL6210_MAX_CID; cid++) - wil_disconnect_cid(wil, cid, reason_code, from_event); + wil_disconnect_cid(vif, cid, reason_code, from_event); } /* link state */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: - wil_bcast_fini(wil); - wil_update_net_queues_bh(wil, NULL, true); + wil_bcast_fini(vif); + wil_update_net_queues_bh(wil, vif, NULL, true); netif_carrier_off(ndev); - wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); + if (!wil_has_other_active_ifaces(wil, ndev, false, true)) + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); - if (test_bit(wil_status_fwconnected, wil->status)) { - clear_bit(wil_status_fwconnected, wil->status); + if (test_and_clear_bit(wil_vif_fwconnected, vif->status)) { + atomic_dec(&wil->connected_vifs); cfg80211_disconnected(ndev, reason_code, NULL, 0, - wil->locally_generated_disc, + vif->locally_generated_disc, GFP_KERNEL); - wil->locally_generated_disc = false; - } else if (test_bit(wil_status_fwconnecting, wil->status)) { + vif->locally_generated_disc = false; + } else if (test_bit(wil_vif_fwconnecting, vif->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - wil->bss = NULL; + vif->bss = NULL; } - clear_bit(wil_status_fwconnecting, wil->status); + clear_bit(wil_vif_fwconnecting, vif->status); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - if (!wil_is_connected(wil)) { - wil_update_net_queues_bh(wil, NULL, true); - clear_bit(wil_status_fwconnected, wil->status); + if (!wil_vif_is_connected(wil, vif->mid)) { + wil_update_net_queues_bh(wil, vif, NULL, true); + if (test_and_clear_bit(wil_vif_fwconnected, + vif->status)) + atomic_dec(&wil->connected_vifs); } else { - wil_update_net_queues_bh(wil, NULL, false); + wil_update_net_queues_bh(wil, vif, NULL, false); } break; default: @@ -303,26 +322,27 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, } } -static void wil_disconnect_worker(struct work_struct *work) +void wil_disconnect_worker(struct work_struct *work) { - struct wil6210_priv *wil = container_of(work, - struct wil6210_priv, disconnect_worker); - struct net_device *ndev = wil_to_ndev(wil); + struct wil6210_vif *vif = container_of(work, + struct wil6210_vif, disconnect_worker); + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); int rc; struct { struct wmi_cmd_hdr wmi; struct wmi_disconnect_event evt; } __packed reply; - if (test_bit(wil_status_fwconnected, wil->status)) + if (test_bit(wil_vif_fwconnected, vif->status)) /* connect succeeded after all */ return; - if (!test_bit(wil_status_fwconnecting, wil->status)) + if (!test_bit(wil_vif_fwconnecting, vif->status)) /* already disconnected */ return; - rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0, WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), WIL6210_DISCONNECT_TO_MS); if (rc) { @@ -330,35 +350,11 @@ static void wil_disconnect_worker(struct work_struct *work) return; } - wil_update_net_queues_bh(wil, NULL, true); + wil_update_net_queues_bh(wil, vif, NULL, true); netif_carrier_off(ndev); cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - clear_bit(wil_status_fwconnecting, wil->status); -} - -static void wil_connect_timer_fn(struct timer_list *t) -{ - struct wil6210_priv *wil = from_timer(wil, t, connect_timer); - bool q; - - wil_err(wil, "Connect timeout detected, disconnect station\n"); - - /* reschedule to thread context - disconnect won't - * run from atomic context. - * queue on wmi_wq to prevent race with connect event. - */ - q = queue_work(wil->wmi_wq, &wil->disconnect_worker); - wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); -} - -static void wil_scan_timer_fn(struct timer_list *t) -{ - struct wil6210_priv *wil = from_timer(wil, t, scan_timer); - - clear_bit(wil_status_fwready, wil->status); - wil_err(wil, "Scan timeout detected, start fw error recovery\n"); - wil_fw_error_recovery(wil); + clear_bit(wil_vif_fwconnecting, vif->status); } static int wil_wait_for_recovery(struct wil6210_priv *wil) @@ -394,12 +390,12 @@ static void wil_fw_error_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); - struct wireless_dev *wdev = wil->wdev; - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; wil_dbg_misc(wil, "fw error worker\n"); - if (!(ndev->flags & IFF_UP)) { + if (!ndev || !(ndev->flags & IFF_UP)) { wil_info(wil, "No recovery - interface is down\n"); return; } @@ -429,6 +425,10 @@ static void wil_fw_error_worker(struct work_struct *work) return; mutex_lock(&wil->mutex); + /* Needs adaptation for multiple VIFs + * need to go over all VIFs and consider the appropriate + * recovery. + */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: @@ -461,8 +461,9 @@ static int wil_find_free_vring(struct wil6210_priv *wil) return -EINVAL; } -int wil_tx_init(struct wil6210_priv *wil, int cid) +int wil_tx_init(struct wil6210_vif *vif, int cid) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc = -EINVAL, ringid; if (cid < 0) { @@ -475,21 +476,22 @@ int wil_tx_init(struct wil6210_priv *wil, int cid) goto out; } - wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", - cid, ringid); + wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n", + cid, vif->mid, ringid); - rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); + rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0); if (rc) - wil_err(wil, "wil_vring_init_tx for CID %d vring %d failed\n", - cid, ringid); + wil_err(wil, "init TX for CID %d MID %d vring %d failed\n", + cid, vif->mid, ringid); out: return rc; } -int wil_bcast_init(struct wil6210_priv *wil) +int wil_bcast_init(struct wil6210_vif *vif) { - int ri = wil->bcast_vring, rc; + struct wil6210_priv *wil = vif_to_wil(vif); + int ri = vif->bcast_vring, rc; if ((ri >= 0) && wil->vring_tx[ri].va) return 0; @@ -498,25 +500,38 @@ int wil_bcast_init(struct wil6210_priv *wil) if (ri < 0) return ri; - wil->bcast_vring = ri; - rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order); + vif->bcast_vring = ri; + rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order); if (rc) - wil->bcast_vring = -1; + vif->bcast_vring = -1; return rc; } -void wil_bcast_fini(struct wil6210_priv *wil) +void wil_bcast_fini(struct wil6210_vif *vif) { - int ri = wil->bcast_vring; + struct wil6210_priv *wil = vif_to_wil(vif); + int ri = vif->bcast_vring; if (ri < 0) return; - wil->bcast_vring = -1; + vif->bcast_vring = -1; wil_vring_fini_tx(wil, ri); } +void wil_bcast_fini_all(struct wil6210_priv *wil) +{ + int i; + struct wil6210_vif *vif; + + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (vif) + wil_bcast_fini(vif); + } +} + int wil_priv_init(struct wil6210_priv *wil) { uint i; @@ -524,38 +539,29 @@ int wil_priv_init(struct wil6210_priv *wil) wil_dbg_misc(wil, "priv_init\n"); memset(wil->sta, 0, sizeof(wil->sta)); - for (i = 0; i < WIL6210_MAX_CID; i++) + for (i = 0; i < WIL6210_MAX_CID; i++) { spin_lock_init(&wil->sta[i].tid_rx_lock); + wil->sta[i].mid = U8_MAX; + } for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) spin_lock_init(&wil->vring_tx_data[i].lock); mutex_init(&wil->mutex); + mutex_init(&wil->vif_mutex); mutex_init(&wil->wmi_mutex); - mutex_init(&wil->probe_client_mutex); - mutex_init(&wil->p2p_wdev_mutex); mutex_init(&wil->halp.lock); init_completion(&wil->wmi_ready); init_completion(&wil->wmi_call); init_completion(&wil->halp.comp); - wil->bcast_vring = -1; - timer_setup(&wil->connect_timer, wil_connect_timer_fn, 0); - timer_setup(&wil->scan_timer, wil_scan_timer_fn, 0); - timer_setup(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn, 0); - - INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); - INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker); - INIT_WORK(&wil->p2p.delayed_listen_work, wil_p2p_delayed_listen_work); INIT_LIST_HEAD(&wil->pending_wmi_ev); - INIT_LIST_HEAD(&wil->probe_client_pending); spin_lock_init(&wil->wmi_ev_lock); spin_lock_init(&wil->net_queue_lock); - wil->net_queue_stopped = 1; init_waitqueue_head(&wil->wq); wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi"); @@ -582,6 +588,9 @@ int wil_priv_init(struct wil6210_priv *wil) memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); wil->vring_idle_trsh = 16; + wil->reply_mid = U8_MAX; + wil->max_vifs = 1; + return 0; out_wmi_wq: @@ -600,7 +609,7 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps) /** * wil6210_disconnect - disconnect one connection - * @wil: driver context + * @vif: virtual interface context * @bssid: peer to disconnect, NULL to disconnect all * @reason_code: Reason code for the Disassociation frame * @from_event: whether is invoked from FW event handler @@ -608,13 +617,15 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps) * Disconnect and release associated resources. If invoked not from the * FW event handler, issue WMI command(s) to trigger MAC disconnect. */ -void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, +void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, u16 reason_code, bool from_event) { + struct wil6210_priv *wil = vif_to_wil(vif); + wil_dbg_misc(wil, "disconnect\n"); - del_timer_sync(&wil->connect_timer); - _wil6210_disconnect(wil, bssid, reason_code, from_event); + del_timer_sync(&vif->connect_timer); + _wil6210_disconnect(vif, bssid, reason_code, from_event); } void wil_priv_deinit(struct wil6210_priv *wil) @@ -622,18 +633,8 @@ void wil_priv_deinit(struct wil6210_priv *wil) wil_dbg_misc(wil, "priv_deinit\n"); wil_set_recovery_state(wil, fw_recovery_idle); - del_timer_sync(&wil->scan_timer); - del_timer_sync(&wil->p2p.discovery_timer); - cancel_work_sync(&wil->disconnect_worker); cancel_work_sync(&wil->fw_error_worker); - cancel_work_sync(&wil->p2p.discovery_expired_work); - cancel_work_sync(&wil->p2p.delayed_listen_work); - mutex_lock(&wil->mutex); - wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); - mutex_unlock(&wil->mutex); wmi_event_flush(wil); - wil_probe_client_flush(wil); - cancel_work_sync(&wil->probe_client_worker); destroy_workqueue(wil->wq_service); destroy_workqueue(wil->wmi_wq); } @@ -715,7 +716,7 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil) offsetof(struct bl_dedicated_registers_v0, boot_loader_struct_version)); if (!tmp) { - wil_dbg_misc(wil, "old BL, skipping halt preperation\n"); + wil_dbg_misc(wil, "old BL, skipping halt preparation\n"); return; } @@ -943,7 +944,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) static int wil_get_bl_info(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; struct wiphy *wiphy = wil_to_wiphy(wil); union { struct bl_dedicated_registers_v0 bl0; @@ -1035,7 +1036,7 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err) static int wil_get_otp_info(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; struct wiphy *wiphy = wil_to_wiphy(wil); u8 mac[8]; @@ -1069,31 +1070,46 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil) return 0; } -void wil_abort_scan(struct wil6210_priv *wil, bool sync) +void wil_abort_scan(struct wil6210_vif *vif, bool sync) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct cfg80211_scan_info info = { .aborted = true, }; - lockdep_assert_held(&wil->p2p_wdev_mutex); + lockdep_assert_held(&wil->vif_mutex); - if (!wil->scan_request) + if (!vif->scan_request) return; - wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request); - del_timer_sync(&wil->scan_timer); - mutex_unlock(&wil->p2p_wdev_mutex); - rc = wmi_abort_scan(wil); + wil_dbg_misc(wil, "Abort scan_request 0x%p\n", vif->scan_request); + del_timer_sync(&vif->scan_timer); + mutex_unlock(&wil->vif_mutex); + rc = wmi_abort_scan(vif); if (!rc && sync) - wait_event_interruptible_timeout(wil->wq, !wil->scan_request, + wait_event_interruptible_timeout(wil->wq, !vif->scan_request, msecs_to_jiffies( WAIT_FOR_SCAN_ABORT_MS)); - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request) { - cfg80211_scan_done(wil->scan_request, &info); - wil->scan_request = NULL; + mutex_lock(&wil->vif_mutex); + if (vif->scan_request) { + cfg80211_scan_done(vif->scan_request, &info); + vif->scan_request = NULL; + } +} + +void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync) +{ + int i; + + lockdep_assert_held(&wil->vif_mutex); + + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif = wil->vifs[i]; + + if (vif) + wil_abort_scan(vif, sync); } } @@ -1138,6 +1154,34 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) } } +static int wil_restore_vifs(struct wil6210_priv *wil) +{ + struct wil6210_vif *vif; + struct net_device *ndev; + struct wireless_dev *wdev; + int i, rc; + + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (!vif) + continue; + vif->ap_isolate = 0; + if (vif->mid) { + ndev = vif_to_ndev(vif); + wdev = vif_to_wdev(vif); + rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr, + wdev->iftype); + if (rc) { + wil_err(wil, "fail to restore VIF %d type %d, rc %d\n", + i, wdev->iftype, rc); + return rc; + } + } + } + + return 0; +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -1145,9 +1189,10 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) */ int wil_reset(struct wil6210_priv *wil, bool load_fw) { - int rc; + int rc, i; unsigned long status_flags = BIT(wil_status_resetting); int no_flash; + struct wil6210_vif *vif; wil_dbg_misc(wil, "reset\n"); @@ -1158,7 +1203,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) static const u8 mac[ETH_ALEN] = { 0x00, 0xde, 0xad, 0x12, 0x34, 0x56, }; - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; ether_addr_copy(ndev->perm_addr, mac); ether_addr_copy(ndev->dev_addr, ndev->perm_addr); @@ -1196,17 +1241,23 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) goto out; } - cancel_work_sync(&wil->disconnect_worker); - wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); - wil_bcast_fini(wil); + mutex_lock(&wil->vif_mutex); + wil_abort_scan_all_vifs(wil, false); + mutex_unlock(&wil->vif_mutex); + + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (vif) { + cancel_work_sync(&vif->disconnect_worker); + wil6210_disconnect(vif, NULL, + WLAN_REASON_DEAUTH_LEAVING, false); + } + } + wil_bcast_fini_all(wil); /* Disable device led before reset*/ wmi_led_cfg(wil, false); - mutex_lock(&wil->p2p_wdev_mutex); - wil_abort_scan(wil, false); - mutex_unlock(&wil->p2p_wdev_mutex); - /* prevent NAPI from being scheduled and prevent wmi commands */ mutex_lock(&wil->wmi_mutex); if (test_bit(wil_status_suspending, wil->status)) @@ -1276,7 +1327,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) } /* init after reset */ - wil->ap_isolate = 0; reinit_completion(&wil->wmi_ready); reinit_completion(&wil->wmi_call); reinit_completion(&wil->halp.comp); @@ -1299,6 +1349,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } + rc = wil_restore_vifs(wil); + if (rc) { + wil_err(wil, "failed to restore vifs, rc %d\n", rc); + return rc; + } + wil_collect_fw_info(wil); if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT) @@ -1337,8 +1393,8 @@ void wil_fw_error_recovery(struct wil6210_priv *wil) int __wil_up(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil->main_ndev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; int rc; WARN_ON(!mutex_is_locked(&wil->mutex)); @@ -1420,10 +1476,10 @@ int __wil_down(struct wil6210_priv *wil) } wil_enable_irq(wil); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); wil_p2p_stop_radio_operations(wil); - wil_abort_scan(wil, false); - mutex_unlock(&wil->p2p_wdev_mutex); + wil_abort_scan_all_vifs(wil, false); + mutex_unlock(&wil->vif_mutex); return wil_reset(wil, false); } @@ -1442,13 +1498,14 @@ int wil_down(struct wil6210_priv *wil) return rc; } -int wil_find_cid(struct wil6210_priv *wil, const u8 *mac) +int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac) { int i; int rc = -ENOENT; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { - if ((wil->sta[i].status != wil_sta_unused) && + if (wil->sta[i].mid == mid && + wil->sta[i].status != wil_sta_unused && ether_addr_equal(wil->sta[i].addr, mac)) { rc = i; break; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 7ba4e0af8f57..05e9408e7ea3 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,13 +16,41 @@ */ #include <linux/etherdevice.h> +#include <linux/rtnetlink.h> #include "wil6210.h" #include "txrx.h" +bool wil_has_other_active_ifaces(struct wil6210_priv *wil, + struct net_device *ndev, bool up, bool ok) +{ + int i; + struct wil6210_vif *vif; + struct net_device *ndev_i; + + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (vif) { + ndev_i = vif_to_ndev(vif); + if (ndev_i != ndev) + if ((up && (ndev_i->flags & IFF_UP)) || + (ok && netif_carrier_ok(ndev_i))) + return true; + } + } + + return false; +} + +bool wil_has_active_ifaces(struct wil6210_priv *wil, bool up, bool ok) +{ + /* use NULL ndev argument to check all interfaces */ + return wil_has_other_active_ifaces(wil, NULL, up, ok); +} + static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - int rc; + int rc = 0; wil_dbg_misc(wil, "open\n"); @@ -31,13 +60,16 @@ static int wil_open(struct net_device *ndev) return -EINVAL; } - rc = wil_pm_runtime_get(wil); - if (rc < 0) - return rc; + if (!wil_has_other_active_ifaces(wil, ndev, true, false)) { + wil_dbg_misc(wil, "open, first iface\n"); + rc = wil_pm_runtime_get(wil); + if (rc < 0) + return rc; - rc = wil_up(wil); - if (rc) - wil_pm_runtime_put(wil); + rc = wil_up(wil); + if (rc) + wil_pm_runtime_put(wil); + } return rc; } @@ -45,13 +77,16 @@ static int wil_open(struct net_device *ndev) static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - int rc; + int rc = 0; wil_dbg_misc(wil, "stop\n"); - rc = wil_down(wil); - if (!rc) - wil_pm_runtime_put(wil); + if (!wil_has_other_active_ifaces(wil, ndev, true, false)) { + wil_dbg_misc(wil, "stop, last iface\n"); + rc = wil_down(wil); + if (!rc) + wil_pm_runtime_put(wil); + } return rc; } @@ -96,11 +131,19 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { struct vring *vring = &wil->vring_tx[i]; struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + struct wil6210_vif *vif; - if (!vring->va || !txdata->enabled) + if (!vring->va || !txdata->enabled || + txdata->mid >= wil->max_vifs) continue; - tx_done += wil_tx_complete(wil, i); + vif = wil->vifs[txdata->mid]; + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "Invalid MID %d\n", txdata->mid); + continue; + } + + tx_done += wil_tx_complete(vif, i); } if (tx_done < budget) { @@ -121,44 +164,137 @@ static void wil_dev_setup(struct net_device *dev) dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT; } -void *wil_if_alloc(struct device *dev) +static void wil_vif_deinit(struct wil6210_vif *vif) { - struct net_device *ndev; - struct wireless_dev *wdev; - struct wil6210_priv *wil; - struct ieee80211_channel *ch; - int rc = 0; + del_timer_sync(&vif->scan_timer); + del_timer_sync(&vif->p2p.discovery_timer); + cancel_work_sync(&vif->disconnect_worker); + cancel_work_sync(&vif->p2p.discovery_expired_work); + cancel_work_sync(&vif->p2p.delayed_listen_work); + wil_probe_client_flush(vif); + cancel_work_sync(&vif->probe_client_worker); +} - wdev = wil_cfg80211_init(dev); - if (IS_ERR(wdev)) { - dev_err(dev, "wil_cfg80211_init failed\n"); - return wdev; - } +void wil_vif_free(struct wil6210_vif *vif) +{ + struct net_device *ndev = vif_to_ndev(vif); - wil = wdev_to_wil(wdev); - wil->wdev = wdev; - wil->radio_wdev = wdev; + wil_vif_deinit(vif); + free_netdev(ndev); +} - wil_dbg_misc(wil, "if_alloc\n"); +static void wil_ndev_destructor(struct net_device *ndev) +{ + struct wil6210_vif *vif = ndev_to_vif(ndev); - rc = wil_priv_init(wil); - if (rc) { - dev_err(dev, "wil_priv_init failed\n"); - goto out_wdev; + wil_vif_deinit(vif); +} + +static void wil_connect_timer_fn(struct timer_list *t) +{ + struct wil6210_vif *vif = from_timer(vif, t, connect_timer); + struct wil6210_priv *wil = vif_to_wil(vif); + bool q; + + wil_err(wil, "Connect timeout detected, disconnect station\n"); + + /* reschedule to thread context - disconnect won't + * run from atomic context. + * queue on wmi_wq to prevent race with connect event. + */ + q = queue_work(wil->wmi_wq, &vif->disconnect_worker); + wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); +} + +static void wil_scan_timer_fn(struct timer_list *t) +{ + struct wil6210_vif *vif = from_timer(vif, t, scan_timer); + struct wil6210_priv *wil = vif_to_wil(vif); + + clear_bit(wil_status_fwready, wil->status); + wil_err(wil, "Scan timeout detected, start fw error recovery\n"); + wil_fw_error_recovery(wil); +} + +static void wil_p2p_discovery_timer_fn(struct timer_list *t) +{ + struct wil6210_vif *vif = from_timer(vif, t, p2p.discovery_timer); + struct wil6210_priv *wil = vif_to_wil(vif); + + wil_dbg_misc(wil, "p2p_discovery_timer_fn\n"); + + schedule_work(&vif->p2p.discovery_expired_work); +} + +static void wil_vif_init(struct wil6210_vif *vif) +{ + vif->bcast_vring = -1; + + mutex_init(&vif->probe_client_mutex); + + timer_setup(&vif->connect_timer, wil_connect_timer_fn, 0); + timer_setup(&vif->scan_timer, wil_scan_timer_fn, 0); + timer_setup(&vif->p2p.discovery_timer, wil_p2p_discovery_timer_fn, 0); + + INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker); + INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker); + INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work); + + INIT_LIST_HEAD(&vif->probe_client_pending); + + vif->net_queue_stopped = 1; +} + +static u8 wil_vif_find_free_mid(struct wil6210_priv *wil) +{ + u8 i; + + for (i = 0; i < wil->max_vifs; i++) { + if (!wil->vifs[i]) + return i; } - wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ - /* default monitor channel */ - ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels; - cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT); + return U8_MAX; +} + +struct wil6210_vif * +wil_vif_alloc(struct wil6210_priv *wil, const char *name, + unsigned char name_assign_type, enum nl80211_iftype iftype) +{ + struct net_device *ndev; + struct wireless_dev *wdev; + struct wil6210_vif *vif; + u8 mid; + + mid = wil_vif_find_free_mid(wil); + if (mid == U8_MAX) { + wil_err(wil, "no available virtual interface\n"); + return ERR_PTR(-EINVAL); + } - ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); + ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, + wil_dev_setup); if (!ndev) { - dev_err(dev, "alloc_netdev_mqs failed\n"); - rc = -ENOMEM; - goto out_priv; + dev_err(wil_to_dev(wil), "alloc_netdev failed\n"); + return ERR_PTR(-ENOMEM); + } + if (mid == 0) { + wil->main_ndev = ndev; + } else { + ndev->priv_destructor = wil_ndev_destructor; + ndev->needs_free_netdev = true; } + vif = ndev_to_vif(ndev); + vif->ndev = ndev; + vif->wil = wil; + vif->mid = mid; + wil_vif_init(vif); + + wdev = &vif->wdev; + wdev->wiphy = wil->wiphy; + wdev->iftype = iftype; + ndev->netdev_ops = &wil_netdev_ops; wil_set_ethtoolops(ndev); ndev->ieee80211_ptr = wdev; @@ -170,21 +306,53 @@ void *wil_if_alloc(struct device *dev) ndev->features |= ndev->hw_features; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; + return vif; +} + +void *wil_if_alloc(struct device *dev) +{ + struct wil6210_priv *wil; + struct wil6210_vif *vif; + int rc = 0; + + wil = wil_cfg80211_init(dev); + if (IS_ERR(wil)) { + dev_err(dev, "wil_cfg80211_init failed\n"); + return wil; + } + + rc = wil_priv_init(wil); + if (rc) { + dev_err(dev, "wil_priv_init failed\n"); + goto out_cfg; + } + + wil_dbg_misc(wil, "if_alloc\n"); + + vif = wil_vif_alloc(wil, "wlan%d", NET_NAME_UNKNOWN, + NL80211_IFTYPE_STATION); + if (IS_ERR(vif)) { + dev_err(dev, "wil_vif_alloc failed\n"); + rc = -ENOMEM; + goto out_priv; + } + + wil->radio_wdev = vif_to_wdev(vif); return wil; - out_priv: +out_priv: wil_priv_deinit(wil); - out_wdev: - wil_wdev_free(wil); +out_cfg: + wil_cfg80211_deinit(wil); return ERR_PTR(rc); } void wil_if_free(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; wil_dbg_misc(wil, "if_free\n"); @@ -193,17 +361,50 @@ void wil_if_free(struct wil6210_priv *wil) wil_priv_deinit(wil); - wil_to_ndev(wil) = NULL; + wil->main_ndev = NULL; + wil_ndev_destructor(ndev); free_netdev(ndev); - wil_wdev_free(wil); + wil_cfg80211_deinit(wil); +} + +int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif) +{ + struct net_device *ndev = vif_to_ndev(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); + bool any_active = wil_has_active_ifaces(wil, true, false); + int rc; + + ASSERT_RTNL(); + + if (wil->vifs[vif->mid]) { + dev_err(&ndev->dev, "VIF with mid %d already in use\n", + vif->mid); + return -EEXIST; + } + if (any_active && vif->mid != 0) { + rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr, + wdev->iftype); + if (rc) + return rc; + } + rc = register_netdevice(ndev); + if (rc < 0) { + dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc); + if (any_active && vif->mid != 0) + wmi_port_delete(wil, vif->mid); + return rc; + } + + wil->vifs[vif->mid] = vif; + return 0; } int wil_if_add(struct wil6210_priv *wil) { - struct wireless_dev *wdev = wil_to_wdev(wil); - struct wiphy *wiphy = wdev->wiphy; - struct net_device *ndev = wil_to_ndev(wil); + struct wiphy *wiphy = wil->wiphy; + struct net_device *ndev = wil->main_ndev; + struct wil6210_vif *vif = ndev_to_vif(ndev); int rc; wil_dbg_misc(wil, "entered"); @@ -216,33 +417,94 @@ int wil_if_add(struct wil6210_priv *wil) return rc; } - netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, + init_dummy_netdev(&wil->napi_ndev); + netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, WIL6210_NAPI_BUDGET); - netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, + netif_tx_napi_add(&wil->napi_ndev, + &wil->napi_tx, wil6210_netdev_poll_tx, WIL6210_NAPI_BUDGET); - wil_update_net_queues_bh(wil, NULL, true); + wil_update_net_queues_bh(wil, vif, NULL, true); - rc = register_netdev(ndev); - if (rc < 0) { - dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc); + rtnl_lock(); + rc = wil_vif_add(wil, vif); + rtnl_unlock(); + if (rc < 0) goto out_wiphy; - } return 0; out_wiphy: - wiphy_unregister(wdev->wiphy); + wiphy_unregister(wiphy); return rc; } +void wil_vif_remove(struct wil6210_priv *wil, u8 mid) +{ + struct wil6210_vif *vif; + struct net_device *ndev; + bool any_active = wil_has_active_ifaces(wil, true, false); + + ASSERT_RTNL(); + if (mid >= wil->max_vifs) { + wil_err(wil, "invalid MID: %d\n", mid); + return; + } + + vif = wil->vifs[mid]; + if (!vif) { + wil_err(wil, "MID %d not registered\n", mid); + return; + } + + ndev = vif_to_ndev(vif); + /* during unregister_netdevice cfg80211_leave may perform operations + * such as stop AP, disconnect, so we only clear the VIF afterwards + */ + unregister_netdevice(ndev); + + mutex_lock(&wil->mutex); + wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); + mutex_unlock(&wil->mutex); + + if (any_active && vif->mid != 0) + wmi_port_delete(wil, vif->mid); + + /* make sure no one is accessing the VIF before removing */ + mutex_lock(&wil->vif_mutex); + wil->vifs[mid] = NULL; + /* ensure NAPI code will see the NULL VIF */ + wmb(); + if (test_bit(wil_status_napi_en, wil->status)) { + napi_synchronize(&wil->napi_rx); + napi_synchronize(&wil->napi_tx); + } + mutex_unlock(&wil->vif_mutex); + + flush_work(&wil->wmi_event_worker); + del_timer_sync(&vif->connect_timer); + cancel_work_sync(&vif->disconnect_worker); + wil_probe_client_flush(vif); + cancel_work_sync(&vif->probe_client_worker); + /* for VIFs, ndev will be freed by destructor after RTNL is unlocked. + * the main interface will be freed in wil_if_free, we need to keep it + * a bit longer so logging macros will work. + */ +} + void wil_if_remove(struct wil6210_priv *wil) { - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil_to_wdev(wil); + struct net_device *ndev = wil->main_ndev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; wil_dbg_misc(wil, "if_remove\n"); - unregister_netdev(ndev); + rtnl_lock(); + wil_vif_remove(wil, 0); + rtnl_unlock(); + + netif_napi_del(&wil->napi_tx); + netif_napi_del(&wil->napi_rx); + wiphy_unregister(wdev->wiphy); } diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index 7dbee2c3e482..db087ea58ddf 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,27 +23,28 @@ #define P2P_SEARCH_DURATION_MS 500 #define P2P_DEFAULT_BI 100 -static int wil_p2p_start_listen(struct wil6210_priv *wil) +static int wil_p2p_start_listen(struct wil6210_vif *vif) { - struct wil_p2p_info *p2p = &wil->p2p; + struct wil6210_priv *wil = vif_to_wil(vif); + struct wil_p2p_info *p2p = &vif->p2p; u8 channel = p2p->listen_chan.hw_value; int rc; lockdep_assert_held(&wil->mutex); - rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI); + rc = wmi_p2p_cfg(vif, channel, P2P_DEFAULT_BI); if (rc) { wil_err(wil, "wmi_p2p_cfg failed\n"); goto out; } - rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); if (rc) { wil_err(wil, "wmi_set_ssid failed\n"); goto out_stop; } - rc = wmi_start_listen(wil); + rc = wmi_start_listen(vif); if (rc) { wil_err(wil, "wmi_start_listen failed\n"); goto out_stop; @@ -53,7 +55,7 @@ static int wil_p2p_start_listen(struct wil6210_priv *wil) jiffies + msecs_to_jiffies(p2p->listen_duration)); out_stop: if (rc) - wmi_stop_discovery(wil); + wmi_stop_discovery(vif); out: return rc; @@ -65,20 +67,12 @@ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request) (request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL); } -void wil_p2p_discovery_timer_fn(struct timer_list *t) -{ - struct wil6210_priv *wil = from_timer(wil, t, p2p.discovery_timer); - - wil_dbg_misc(wil, "p2p_discovery_timer_fn\n"); - - schedule_work(&wil->p2p.discovery_expired_work); -} - -int wil_p2p_search(struct wil6210_priv *wil, +int wil_p2p_search(struct wil6210_vif *vif, struct cfg80211_scan_request *request) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; - struct wil_p2p_info *p2p = &wil->p2p; + struct wil_p2p_info *p2p = &vif->p2p; wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL); @@ -90,20 +84,20 @@ int wil_p2p_search(struct wil6210_priv *wil, goto out; } - rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); + rc = wmi_p2p_cfg(vif, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); if (rc) { wil_err(wil, "wmi_p2p_cfg failed\n"); goto out; } - rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); + rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); if (rc) { wil_err(wil, "wmi_set_ssid failed\n"); goto out_stop; } /* Set application IE to probe request and probe response */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, + rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) { wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n"); @@ -113,14 +107,14 @@ int wil_p2p_search(struct wil6210_priv *wil, /* supplicant doesn't provide Probe Response IEs. As a workaround - * re-use Probe Request IEs */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, + rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP, request->ie_len, request->ie); if (rc) { wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n"); goto out_stop; } - rc = wmi_start_search(wil); + rc = wmi_start_search(vif); if (rc) { wil_err(wil, "wmi_start_search failed\n"); goto out_stop; @@ -133,7 +127,7 @@ int wil_p2p_search(struct wil6210_priv *wil, out_stop: if (rc) - wmi_stop_discovery(wil); + wmi_stop_discovery(vif); out: return rc; @@ -143,7 +137,8 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, unsigned int duration, struct ieee80211_channel *chan, u64 *cookie) { - struct wil_p2p_info *p2p = &wil->p2p; + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); + struct wil_p2p_info *p2p = &vif->p2p; int rc; if (!chan) @@ -163,23 +158,24 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, *cookie = ++p2p->cookie; p2p->listen_duration = duration; - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request) { + mutex_lock(&wil->vif_mutex); + if (vif->scan_request) { wil_dbg_misc(wil, "Delaying p2p listen until scan done\n"); p2p->pending_listen_wdev = wdev; p2p->discovery_started = 1; rc = 0; - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); goto out; } - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); - rc = wil_p2p_start_listen(wil); + rc = wil_p2p_start_listen(vif); if (rc) goto out; p2p->discovery_started = 1; - wil->radio_wdev = wdev; + if (vif->mid == 0) + wil->radio_wdev = wdev; cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL); @@ -189,9 +185,9 @@ out: return rc; } -u8 wil_p2p_stop_discovery(struct wil6210_priv *wil) +u8 wil_p2p_stop_discovery(struct wil6210_vif *vif) { - struct wil_p2p_info *p2p = &wil->p2p; + struct wil_p2p_info *p2p = &vif->p2p; u8 started = p2p->discovery_started; if (p2p->discovery_started) { @@ -200,7 +196,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil) p2p->pending_listen_wdev = NULL; } else { del_timer_sync(&p2p->discovery_timer); - wmi_stop_discovery(wil); + wmi_stop_discovery(vif); } p2p->discovery_started = 0; } @@ -208,9 +204,10 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil) return started; } -int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) +int wil_p2p_cancel_listen(struct wil6210_vif *vif, u64 cookie) { - struct wil_p2p_info *p2p = &wil->p2p; + struct wil6210_priv *wil = vif_to_wil(vif); + struct wil_p2p_info *p2p = &vif->p2p; u8 started; mutex_lock(&wil->mutex); @@ -222,7 +219,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) return -ENOENT; } - started = wil_p2p_stop_discovery(wil); + started = wil_p2p_stop_discovery(vif); mutex_unlock(&wil->mutex); @@ -231,13 +228,14 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) return -ENOENT; } - mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_remain_on_channel_expired(wil->radio_wdev, + mutex_lock(&wil->vif_mutex); + cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif), p2p->cookie, &p2p->listen_chan, GFP_KERNEL); - wil->radio_wdev = wil->wdev; - mutex_unlock(&wil->p2p_wdev_mutex); + if (vif->mid == 0) + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; + mutex_unlock(&wil->vif_mutex); return 0; } @@ -245,40 +243,43 @@ void wil_p2p_listen_expired(struct work_struct *work) { struct wil_p2p_info *p2p = container_of(work, struct wil_p2p_info, discovery_expired_work); - struct wil6210_priv *wil = container_of(p2p, - struct wil6210_priv, p2p); + struct wil6210_vif *vif = container_of(p2p, + struct wil6210_vif, p2p); + struct wil6210_priv *wil = vif_to_wil(vif); u8 started; wil_dbg_misc(wil, "p2p_listen_expired\n"); mutex_lock(&wil->mutex); - started = wil_p2p_stop_discovery(wil); + started = wil_p2p_stop_discovery(vif); mutex_unlock(&wil->mutex); - if (started) { - mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_remain_on_channel_expired(wil->radio_wdev, - p2p->cookie, - &p2p->listen_chan, - GFP_KERNEL); - wil->radio_wdev = wil->wdev; - mutex_unlock(&wil->p2p_wdev_mutex); - } + if (!started) + return; + mutex_lock(&wil->vif_mutex); + cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif), + p2p->cookie, + &p2p->listen_chan, + GFP_KERNEL); + if (vif->mid == 0) + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; + mutex_unlock(&wil->vif_mutex); } void wil_p2p_search_expired(struct work_struct *work) { struct wil_p2p_info *p2p = container_of(work, struct wil_p2p_info, discovery_expired_work); - struct wil6210_priv *wil = container_of(p2p, - struct wil6210_priv, p2p); + struct wil6210_vif *vif = container_of(p2p, + struct wil6210_vif, p2p); + struct wil6210_priv *wil = vif_to_wil(vif); u8 started; wil_dbg_misc(wil, "p2p_search_expired\n"); mutex_lock(&wil->mutex); - started = wil_p2p_stop_discovery(wil); + started = wil_p2p_stop_discovery(vif); mutex_unlock(&wil->mutex); if (started) { @@ -286,13 +287,15 @@ void wil_p2p_search_expired(struct work_struct *work) .aborted = false, }; - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request) { - cfg80211_scan_done(wil->scan_request, &info); - wil->scan_request = NULL; - wil->radio_wdev = wil->wdev; + mutex_lock(&wil->vif_mutex); + if (vif->scan_request) { + cfg80211_scan_done(vif->scan_request, &info); + vif->scan_request = NULL; + if (vif->mid == 0) + wil->radio_wdev = + wil->main_ndev->ieee80211_ptr; } - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); } } @@ -300,8 +303,9 @@ void wil_p2p_delayed_listen_work(struct work_struct *work) { struct wil_p2p_info *p2p = container_of(work, struct wil_p2p_info, delayed_listen_work); - struct wil6210_priv *wil = container_of(p2p, - struct wil6210_priv, p2p); + struct wil6210_vif *vif = container_of(p2p, + struct wil6210_vif, p2p); + struct wil6210_priv *wil = vif_to_wil(vif); int rc; mutex_lock(&wil->mutex); @@ -310,31 +314,33 @@ void wil_p2p_delayed_listen_work(struct work_struct *work) if (!p2p->discovery_started || !p2p->pending_listen_wdev) goto out; - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request) { + mutex_lock(&wil->vif_mutex); + if (vif->scan_request) { /* another scan started, wait again... */ - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); goto out; } - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); - rc = wil_p2p_start_listen(wil); + rc = wil_p2p_start_listen(vif); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_lock(&wil->vif_mutex); if (rc) { cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev, p2p->cookie, &p2p->listen_chan, GFP_KERNEL); - wil->radio_wdev = wil->wdev; + if (vif->mid == 0) + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; } else { cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie, &p2p->listen_chan, p2p->listen_duration, GFP_KERNEL); - wil->radio_wdev = p2p->pending_listen_wdev; + if (vif->mid == 0) + wil->radio_wdev = p2p->pending_listen_wdev; } p2p->pending_listen_wdev = NULL; - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); out: mutex_unlock(&wil->mutex); @@ -342,34 +348,35 @@ out: void wil_p2p_stop_radio_operations(struct wil6210_priv *wil) { - struct wil_p2p_info *p2p = &wil->p2p; + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); + struct wil_p2p_info *p2p = &vif->p2p; struct cfg80211_scan_info info = { .aborted = true, }; lockdep_assert_held(&wil->mutex); - lockdep_assert_held(&wil->p2p_wdev_mutex); + lockdep_assert_held(&wil->vif_mutex); if (wil->radio_wdev != wil->p2p_wdev) goto out; if (!p2p->discovery_started) { /* Regular scan on the p2p device */ - if (wil->scan_request && - wil->scan_request->wdev == wil->p2p_wdev) - wil_abort_scan(wil, true); + if (vif->scan_request && + vif->scan_request->wdev == wil->p2p_wdev) + wil_abort_scan(vif, true); goto out; } /* Search or listen on p2p device */ - mutex_unlock(&wil->p2p_wdev_mutex); - wil_p2p_stop_discovery(wil); - mutex_lock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); + wil_p2p_stop_discovery(vif); + mutex_lock(&wil->vif_mutex); - if (wil->scan_request) { + if (vif->scan_request) { /* search */ - cfg80211_scan_done(wil->scan_request, &info); - wil->scan_request = NULL; + cfg80211_scan_done(vif->scan_request, &info); + vif->scan_request = NULL; } else { /* listen */ cfg80211_remain_on_channel_expired(wil->radio_wdev, @@ -379,5 +386,5 @@ void wil_p2p_stop_radio_operations(struct wil6210_priv *wil) } out: - wil->radio_wdev = wil->wdev; + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 809092a49192..19cbc6add637 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -137,6 +137,20 @@ void wil_enable_irq(struct wil6210_priv *wil) enable_irq(wil->pdev->irq); } +static void wil_remove_all_additional_vifs(struct wil6210_priv *wil) +{ + struct wil6210_vif *vif; + int i; + + for (i = 1; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (vif) { + wil_vif_prepare_stop(vif); + wil_vif_remove(wil, vif->mid); + } + } +} + /* Bus ops */ static int wil_if_pcie_enable(struct wil6210_priv *wil) { @@ -148,10 +162,8 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) */ int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; - bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, - wil->fw_capabilities); - wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only); + wil_dbg_misc(wil, "if_pcie_enable\n"); pci_set_master(pdev); @@ -172,11 +184,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) if (rc) goto stop_master; - /* need reset here to obtain MAC or in case of WMI-only FW, full reset - * and fw loading takes place - */ + /* need reset here to obtain MAC */ mutex_lock(&wil->mutex); - rc = wil_reset(wil, wmi_only); + rc = wil_reset(wil, false); mutex_unlock(&wil->mutex); if (rc) goto release_irq; @@ -356,6 +366,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto bus_disable; } + /* in case of WMI-only FW, perform full reset and FW loading */ + if (test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) { + wil_dbg_misc(wil, "Loading WMI only FW\n"); + mutex_lock(&wil->mutex); + rc = wil_reset(wil, true); + mutex_unlock(&wil->mutex); + if (rc) { + wil_err(wil, "failed to load WMI only FW\n"); + goto if_remove; + } + } + if (IS_ENABLED(CONFIG_PM)) wil->pm_notify.notifier_call = wil6210_pm_notify; @@ -372,6 +394,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +if_remove: + wil_if_remove(wil); bus_disable: wil_if_pcie_disable(wil); err_iounmap: @@ -402,6 +426,7 @@ static void wil_pcie_remove(struct pci_dev *pdev) wil6210_debugfs_remove(wil); rtnl_lock(); wil_p2p_wdev_free(wil); + wil_remove_all_additional_vifs(wil); rtnl_unlock(); wil_if_remove(wil); wil_if_pcie_disable(wil); @@ -425,12 +450,15 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - struct net_device *ndev = wil_to_ndev(wil); - bool keep_radio_on = ndev->flags & IFF_UP && - wil->keep_radio_on_during_sleep; + bool keep_radio_on, active_ifaces; wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + mutex_lock(&wil->vif_mutex); + active_ifaces = wil_has_active_ifaces(wil, true, false); + mutex_unlock(&wil->vif_mutex); + keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep; + rc = wil_can_suspend(wil, is_runtime); if (rc) goto out; @@ -457,12 +485,15 @@ static int wil6210_resume(struct device *dev, bool is_runtime) int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - struct net_device *ndev = wil_to_ndev(wil); - bool keep_radio_on = ndev->flags & IFF_UP && - wil->keep_radio_on_during_sleep; + bool keep_radio_on, active_ifaces; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); + mutex_lock(&wil->vif_mutex); + active_ifaces = wil_has_active_ifaces(wil, true, false); + mutex_unlock(&wil->vif_mutex); + keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep; + /* In case radio stays on, platform device will control * PCIe master */ diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 0a96518a566f..ba81fb3ac96f 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,13 +21,72 @@ #define WIL6210_AUTOSUSPEND_DELAY_MS (1000) +static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil) +{ + int i; + + mutex_lock(&wil->vif_mutex); + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif = wil->vifs[i]; + + if (vif && test_bit(wil_vif_fwconnected, vif->status)) + wil_update_net_queues_bh(wil, vif, NULL, false); + } + mutex_unlock(&wil->vif_mutex); +} + +static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil) +{ + int i; + + mutex_lock(&wil->vif_mutex); + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif = wil->vifs[i]; + + if (vif) + wil_update_net_queues_bh(wil, vif, NULL, true); + } + mutex_unlock(&wil->vif_mutex); +} + +static bool +wil_can_suspend_vif(struct wil6210_priv *wil, struct wil6210_vif *vif, + bool is_runtime) +{ + struct wireless_dev *wdev = vif_to_wdev(vif); + + switch (wdev->iftype) { + case NL80211_IFTYPE_MONITOR: + wil_dbg_pm(wil, "Sniffer\n"); + return false; + + /* for STA-like interface, don't runtime suspend */ + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(wil_vif_fwconnecting, vif->status)) { + wil_dbg_pm(wil, "Delay suspend when connecting\n"); + return false; + } + if (is_runtime) { + wil_dbg_pm(wil, "STA-like interface\n"); + return false; + } + break; + /* AP-like interface - can't suspend */ + default: + wil_dbg_pm(wil, "AP-like interface\n"); + return false; + } + + return true; +} + int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { - int rc = 0; - struct wireless_dev *wdev = wil->wdev; - struct net_device *ndev = wil_to_ndev(wil); + int rc = 0, i; bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities); + bool active_ifaces; wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); @@ -40,7 +100,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) rc = -EBUSY; goto out; } - if (!(ndev->flags & IFF_UP)) { + + mutex_lock(&wil->vif_mutex); + active_ifaces = wil_has_active_ifaces(wil, true, false); + mutex_unlock(&wil->vif_mutex); + + if (!active_ifaces) { /* can always sleep when down */ wil_dbg_pm(wil, "Interface is down\n"); goto out; @@ -57,32 +122,19 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) } /* interface is running */ - switch (wdev->iftype) { - case NL80211_IFTYPE_MONITOR: - wil_dbg_pm(wil, "Sniffer\n"); - rc = -EBUSY; - goto out; - /* for STA-like interface, don't runtime suspend */ - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_P2P_CLIENT: - if (test_bit(wil_status_fwconnecting, wil->status)) { - wil_dbg_pm(wil, "Delay suspend when connecting\n"); - rc = -EBUSY; - goto out; - } - /* Runtime pm not supported in case the interface is up */ - if (is_runtime) { - wil_dbg_pm(wil, "STA-like interface\n"); + mutex_lock(&wil->vif_mutex); + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif = wil->vifs[i]; + + if (!vif) + continue; + if (!wil_can_suspend_vif(wil, vif, is_runtime)) { rc = -EBUSY; + mutex_unlock(&wil->vif_mutex); goto out; } - break; - /* AP-like interface - can't suspend */ - default: - wil_dbg_pm(wil, "AP-like interface\n"); - rc = -EBUSY; - break; } + mutex_unlock(&wil->vif_mutex); out: wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n", @@ -127,8 +179,7 @@ static int wil_resume_keep_radio_on(struct wil6210_priv *wil) } /* Wake all queues */ - if (test_bit(wil_status_fwconnected, wil->status)) - wil_update_net_queues_bh(wil, NULL, false); + wil_pm_wake_connected_net_queues(wil); out: if (rc) @@ -152,7 +203,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) wil->suspend_stats.rejected_by_host++; return -EBUSY; } - wil_update_net_queues_bh(wil, NULL, true); + wil_pm_stop_all_net_queues(wil); if (!wil_is_tx_idle(wil)) { wil_dbg_pm(wil, "Pending TX data, reject suspend\n"); @@ -243,22 +294,20 @@ resume_after_fail: /* if resume succeeded, reject the suspend */ if (!rc) { rc = -EBUSY; - if (test_bit(wil_status_fwconnected, wil->status)) - wil_update_net_queues_bh(wil, NULL, false); + wil_pm_wake_connected_net_queues(wil); } return rc; reject_suspend: clear_bit(wil_status_suspending, wil->status); - if (test_bit(wil_status_fwconnected, wil->status)) - wil_update_net_queues_bh(wil, NULL, false); + wil_pm_wake_connected_net_queues(wil); return -EBUSY; } static int wil_suspend_radio_off(struct wil6210_priv *wil) { int rc = 0; - struct net_device *ndev = wil_to_ndev(wil); + bool active_ifaces; wil_dbg_pm(wil, "suspend radio off\n"); @@ -272,7 +321,11 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil) } /* if netif up, hardware is alive, shut it down */ - if (ndev->flags & IFF_UP) { + mutex_lock(&wil->vif_mutex); + active_ifaces = wil_has_active_ifaces(wil, true, false); + mutex_unlock(&wil->vif_mutex); + + if (active_ifaces) { rc = wil_down(wil); if (rc) { wil_err(wil, "wil_down : %d\n", rc); @@ -306,16 +359,19 @@ out: static int wil_resume_radio_off(struct wil6210_priv *wil) { int rc = 0; - struct net_device *ndev = wil_to_ndev(wil); + bool active_ifaces; wil_dbg_pm(wil, "Enabling PCIe IRQ\n"); wil_enable_irq(wil); - /* if netif up, bring hardware up + /* if any netif up, bring hardware up * During open(), IFF_UP set after actual device method * invocation. This prevent recursive call to wil_up() * wil_status_suspended will be cleared in wil_reset */ - if (ndev->flags & IFF_UP) + mutex_lock(&wil->vif_mutex); + active_ifaces = wil_has_active_ifaces(wil, true, false); + mutex_unlock(&wil->vif_mutex); + if (active_ifaces) rc = wil_up(wil); else clear_bit(wil_status_suspended, wil->status); diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 4ea27b0bd278..c49f7988369e 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -53,6 +54,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, u32 i; struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_pmc_cmd pmc_cmd = {0}; int last_cmd_err = -ENOMEM; @@ -186,6 +188,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, + vif->mid, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { @@ -236,6 +239,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) { struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_pmc_cmd pmc_cmd = {0}; mutex_lock(&pmc->lock); @@ -254,8 +258,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = - wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, - sizeof(pmc_cmd)); + wmi_send(wil, WMI_PMC_CMDID, vif->mid, + &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, "WMI_PMC_CMD with RELEASE op failed, status %d", diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index a43cffcf1bbf..14dcb0698dee 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -40,11 +41,10 @@ static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq) return seq_sub(seq, r->ssn) % r->buf_size; } -static void wil_release_reorder_frame(struct wil6210_priv *wil, +static void wil_release_reorder_frame(struct net_device *ndev, struct wil_tid_ampdu_rx *r, int index) { - struct net_device *ndev = wil_to_ndev(wil); struct sk_buff *skb = r->reorder_buf[index]; if (!skb) @@ -59,7 +59,7 @@ no_frame: r->head_seq_num = seq_inc(r->head_seq_num); } -static void wil_release_reorder_frames(struct wil6210_priv *wil, +static void wil_release_reorder_frames(struct net_device *ndev, struct wil_tid_ampdu_rx *r, u16 hseq) { @@ -73,18 +73,18 @@ static void wil_release_reorder_frames(struct wil6210_priv *wil, */ while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) { index = reorder_index(r, r->head_seq_num); - wil_release_reorder_frame(wil, r, index); + wil_release_reorder_frame(ndev, r, index); } r->head_seq_num = hseq; } -static void wil_reorder_release(struct wil6210_priv *wil, +static void wil_reorder_release(struct net_device *ndev, struct wil_tid_ampdu_rx *r) { int index = reorder_index(r, r->head_seq_num); while (r->reorder_buf[index]) { - wil_release_reorder_frame(wil, r, index); + wil_release_reorder_frame(ndev, r, index); index = reorder_index(r, r->head_seq_num); } } @@ -93,7 +93,8 @@ static void wil_reorder_release(struct wil6210_priv *wil, void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { - struct net_device *ndev = wil_to_ndev(wil); + struct wil6210_vif *vif; + struct net_device *ndev; struct vring_rx_desc *d = wil_skb_rxdesc(skb); int tid = wil_rxdesc_tid(d); int cid = wil_rxdesc_cid(d); @@ -108,6 +109,14 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", mid, cid, tid, seq, mcast); + vif = wil->vifs[mid]; + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid); + dev_kfree_skb(skb); + return; + } + ndev = vif_to_ndev(vif); + if (unlikely(mcast)) { wil_netif_rx_any(skb, ndev); return; @@ -168,7 +177,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) if (!seq_less(seq, r->head_seq_num + r->buf_size)) { hseq = seq_inc(seq_sub(seq, r->buf_size)); /* release stored frames up to new head to stack */ - wil_release_reorder_frames(wil, r, hseq); + wil_release_reorder_frames(ndev, r, hseq); } /* Now the new frame is always in the range of the reordering buffer */ @@ -199,16 +208,18 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) r->reorder_buf[index] = skb; r->reorder_time[index] = jiffies; r->stored_mpdu_num++; - wil_reorder_release(wil, r); + wil_reorder_release(ndev, r); out: spin_unlock(&sta->tid_rx_lock); } /* process BAR frame, called in NAPI context */ -void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq) +void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, + u8 cid, u8 tid, u16 seq) { struct wil_sta_info *sta = &wil->sta[cid]; + struct net_device *ndev = vif_to_ndev(vif); struct wil_tid_ampdu_rx *r; spin_lock(&sta->tid_rx_lock); @@ -223,9 +234,9 @@ void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq) seq, r->head_seq_num); goto out; } - wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n", - cid, tid, seq, r->head_seq_num); - wil_release_reorder_frames(wil, r, seq); + wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n", + cid, vif->mid, tid, seq, r->head_seq_num); + wil_release_reorder_frames(ndev, r, seq); out: spin_unlock(&sta->tid_rx_lock); @@ -292,8 +303,8 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) } /* Block Ack - Rx side (recipient) */ -int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, - u8 dialog_token, __le16 ba_param_set, +int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, + u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { @@ -354,7 +365,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } } - rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, + rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); if (rc || (status != WLAN_STATUS_SUCCESS)) { wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, @@ -393,7 +404,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) goto out; } txdata->addba_in_progress = true; - rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout); + rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout); if (rc) { wil_err(wil, "wmi_addba failed, rc (%d)", rc); txdata->addba_in_progress = false; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 16b8a4e5201f..b60b9fcaaebd 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -474,7 +475,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, struct vring *vring) { struct device *dev = wil_to_dev(wil); - struct net_device *ndev = wil_to_ndev(wil); + struct wil6210_vif *vif; + struct net_device *ndev; volatile struct vring_rx_desc *_d; struct vring_rx_desc *d; struct sk_buff *skb; @@ -483,7 +485,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; u16 dmalen; u8 ftype; - int cid; + int cid, mid; int i; struct wil_net_stats *stats; @@ -520,6 +522,16 @@ again: (const void *)d, sizeof(*d), false); cid = wil_rxdesc_cid(d); + mid = wil_rxdesc_mid(d); + vif = wil->vifs[mid]; + + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d", + mid); + kfree_skb(skb); + goto again; + } + ndev = vif_to_ndev(vif); stats = &wil->sta[cid].stats; if (unlikely(dmalen > sz)) { @@ -553,7 +565,6 @@ again: ftype = wil_rxdesc_ftype(d) << 2; if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { u8 fc1 = wil_rxdesc_fc1(d); - int mid = wil_rxdesc_mid(d); int tid = wil_rxdesc_tid(d); u16 seq = wil_rxdesc_seq(d); @@ -565,7 +576,7 @@ again: wil_dbg_txrx(wil, "BAR: MID %d CID %d TID %d Seq 0x%03x\n", mid, cid, tid, seq); - wil_rx_bar(wil, cid, tid, seq); + wil_rx_bar(wil, vif, cid, tid, seq); } else { /* print again all info. One can enable only this * without overhead for printing every Rx frame @@ -621,10 +632,15 @@ again: /** * allocate and fill up to @count buffers in rx ring * buffers posted at @swtail + * Note: we have a single RX queue for servicing all VIFs, but we + * allocate skbs with headroom according to main interface only. This + * means it will not work with monitor interface together with other VIFs. + * Currently we only support monitor interface on its own without other VIFs, + * and we will need to fix this code once we add support. */ static int wil_rx_refill(struct wil6210_priv *wil, int count) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; struct vring *v = &wil->vring_rx; u32 next_tail; int rc = 0; @@ -713,8 +729,9 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) { gro_result_t rc = GRO_NORMAL; + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = ndev_to_wil(ndev); - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wireless_dev *wdev = vif_to_wdev(vif); unsigned int len = skb->len; struct vring_rx_desc *d = wil_skb_rxdesc(skb); int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ @@ -751,14 +768,15 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) goto stats; } - if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { + if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in * local net stack and back to the wireless medium */ xmit_skb = skb_copy(skb, GFP_ATOMIC); } else { - int xmit_cid = wil_find_cid(wil, eth->h_dest); + int xmit_cid = wil_find_cid(wil, vif->mid, + eth->h_dest); if (xmit_cid >= 0) { /* The destination station is associated to @@ -786,8 +804,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) } if (skb) { /* deliver to local stack */ - skb->protocol = eth_type_trans(skb, ndev); + skb->dev = ndev; rc = napi_gro_receive(&wil->napi_rx, skb); wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", len, gro_res_str[rc]); @@ -815,7 +833,8 @@ stats: */ void wil_rx_handle(struct wil6210_priv *wil, int *quota) { - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; struct vring *v = &wil->vring_rx; struct sk_buff *skb; @@ -827,7 +846,8 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { (*quota)--; - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { + /* monitor is currently supported on main interface only */ + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { skb->dev = ndev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -911,12 +931,14 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata) txdata->agg_timeout = 0; txdata->agg_amsdu = 0; txdata->addba_in_progress = false; + txdata->mid = U8_MAX; spin_unlock_bh(&txdata->lock); } -int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, +int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, int cid, int tid) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct wmi_vring_cfg_cmd cmd = { .action = cpu_to_le32(WMI_VRING_CMD_ADD), @@ -966,9 +988,9 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); - if (!wil->privacy) + if (!vif->privacy) txdata->dot1x_open = true; - rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) goto out_free; @@ -982,6 +1004,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, spin_lock_bh(&txdata->lock); vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); + txdata->mid = vif->mid; txdata->enabled = 1; spin_unlock_bh(&txdata->lock); @@ -1003,8 +1026,9 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, return rc; } -int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) +int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct wmi_bcast_vring_cfg_cmd cmd = { .action = cpu_to_le32(WMI_VRING_CMD_ADD), @@ -1046,9 +1070,10 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); - if (!wil->privacy) + if (!vif->privacy) txdata->dot1x_open = true; - rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) goto out_free; @@ -1062,6 +1087,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) spin_lock_bh(&txdata->lock); vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); + txdata->mid = vif->mid; txdata->enabled = 1; spin_unlock_bh(&txdata->lock); @@ -1091,6 +1117,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; + txdata->mid = U8_MAX; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); /* napi_synchronize waits for completion of the current NAPI but will @@ -1108,11 +1135,12 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) } static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct sk_buff *skb) { int i; struct ethhdr *eth = (void *)skb->data; - int cid = wil_find_cid(wil, eth->h_dest); + int cid = wil_find_cid(wil, vif->mid, eth->h_dest); if (cid < 0) return NULL; @@ -1142,10 +1170,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, return NULL; } -static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, - struct sk_buff *skb); +static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, struct sk_buff *skb); static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct sk_buff *skb) { struct vring *v; @@ -1160,7 +1189,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; txdata = &wil->vring_tx_data[i]; - if (!v->va || !txdata->enabled) + if (!v->va || !txdata->enabled || txdata->mid != vif->mid) continue; cid = wil->vring2cid_tid[i][0]; @@ -1193,11 +1222,12 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, * - for PBSS */ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct sk_buff *skb) { struct vring *v; struct vring_tx_data *txdata; - int i = wil->bcast_vring; + int i = vif->bcast_vring; if (i < 0) return NULL; @@ -1222,6 +1252,7 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil, } static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct sk_buff *skb) { struct vring *v, *v2; @@ -1230,13 +1261,13 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, u8 cid; struct ethhdr *eth = (void *)skb->data; char *src = eth->h_source; - struct vring_tx_data *txdata; + struct vring_tx_data *txdata, *txdata2; /* find 1-st vring eligible for data */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; txdata = &wil->vring_tx_data[i]; - if (!v->va || !txdata->enabled) + if (!v->va || !txdata->enabled || txdata->mid != vif->mid) continue; cid = wil->vring2cid_tid[i][0]; @@ -1264,7 +1295,8 @@ found: /* find other active vrings and duplicate skb for each */ for (i++; i < WIL6210_MAX_TX_RINGS; i++) { v2 = &wil->vring_tx[i]; - if (!v2->va) + txdata2 = &wil->vring_tx_data[i]; + if (!v2->va || txdata2->mid != vif->mid) continue; cid = wil->vring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ @@ -1280,7 +1312,7 @@ found: if (skb2) { wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); wil_set_da_for_vring(wil, skb2, i); - wil_tx_vring(wil, v2, skb2); + wil_tx_vring(wil, vif, v2, skb2); } else { wil_err(wil, "skb_copy failed\n"); } @@ -1417,8 +1449,8 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; } -static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, - struct sk_buff *skb) +static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); @@ -1710,8 +1742,8 @@ err_exit: return rc; } -static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, - struct sk_buff *skb) +static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); struct vring_tx_desc dd, *d = ⅆ @@ -1725,7 +1757,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, uint i = swhead; dma_addr_t pa; int used; - bool mcast = (vring_index == wil->bcast_vring); + bool mcast = (vring_index == vif->bcast_vring); uint len = skb_headlen(skb); wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, @@ -1860,8 +1892,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, return -EINVAL; } -static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, - struct sk_buff *skb) +static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, struct sk_buff *skb) { int vring_index = vring - wil->vring_tx; struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; @@ -1879,7 +1911,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, } rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) - (wil, vring, skb); + (wil, vif, vring, skb); spin_unlock(&txdata->lock); @@ -1888,6 +1920,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /** * Check status of tx vrings and stop/wake net queues if needed + * It will start/stop net queues of a specific VIF net_device. * * This function does one of two checks: * In case check_stop is true, will check if net queues need to be stopped. If @@ -1903,28 +1936,32 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, * availability and modified vring has high descriptor availability. */ static inline void __wil_update_net_queues(struct wil6210_priv *wil, + struct wil6210_vif *vif, struct vring *vring, bool check_stop) { int i; + if (unlikely(!vif)) + return; + if (vring) - wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d", - (int)(vring - wil->vring_tx), check_stop, - wil->net_queue_stopped); + wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d", + (int)(vring - wil->vring_tx), vif->mid, check_stop, + vif->net_queue_stopped); else - wil_dbg_txrx(wil, "check_stop=%d, stopped=%d", - check_stop, wil->net_queue_stopped); + wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", + check_stop, vif->mid, vif->net_queue_stopped); - if (check_stop == wil->net_queue_stopped) + if (check_stop == vif->net_queue_stopped) /* net queues already in desired state */ return; if (check_stop) { if (!vring || unlikely(wil_vring_avail_low(vring))) { /* not enough room in the vring */ - netif_tx_stop_all_queues(wil_to_ndev(wil)); - wil->net_queue_stopped = true; + netif_tx_stop_all_queues(vif_to_ndev(vif)); + vif->net_queue_stopped = true; wil_dbg_txrx(wil, "netif_tx_stop called\n"); } return; @@ -1940,7 +1977,8 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, struct vring *cur_vring = &wil->vring_tx[i]; struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - if (!cur_vring->va || !txdata->enabled || cur_vring == vring) + if (txdata->mid != vif->mid || !cur_vring->va || + !txdata->enabled || cur_vring == vring) continue; if (wil_vring_avail_low(cur_vring)) { @@ -1953,30 +1991,31 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, if (!vring || wil_vring_avail_high(vring)) { /* enough room in the vring */ wil_dbg_txrx(wil, "calling netif_tx_wake\n"); - netif_tx_wake_all_queues(wil_to_ndev(wil)); - wil->net_queue_stopped = false; + netif_tx_wake_all_queues(vif_to_ndev(vif)); + vif->net_queue_stopped = false; } } -void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring, - bool check_stop) +void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, bool check_stop) { spin_lock(&wil->net_queue_lock); - __wil_update_net_queues(wil, vring, check_stop); + __wil_update_net_queues(wil, vif, vring, check_stop); spin_unlock(&wil->net_queue_lock); } -void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring, - bool check_stop) +void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, bool check_stop) { spin_lock_bh(&wil->net_queue_lock); - __wil_update_net_queues(wil, vring, check_stop); + __wil_update_net_queues(wil, vif, vring, check_stop); spin_unlock_bh(&wil->net_queue_lock); } netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct wil6210_priv *wil = ndev_to_wil(ndev); + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wil6210_priv *wil = vif_to_wil(vif); struct ethhdr *eth = (void *)skb->data; bool bcast = is_multicast_ether_addr(eth->h_dest); struct vring *vring; @@ -1991,49 +2030,50 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) } goto drop; } - if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { - wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n"); + if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) { + wil_dbg_ratelimited(wil, + "VIF not connected, packet dropped\n"); goto drop; } - if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { + if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) { wil_err(wil, "Xmit in monitor mode not supported\n"); goto drop; } pr_once_fw = false; /* find vring */ - if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) { + if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { /* in STA mode (ESS), all to same VRING (to AP) */ - vring = wil_find_tx_vring_sta(wil, skb); + vring = wil_find_tx_vring_sta(wil, vif, skb); } else if (bcast) { - if (wil->pbss) + if (vif->pbss) /* in pbss, no bcast VRING - duplicate skb in * all stations VRINGs */ - vring = wil_find_tx_bcast_2(wil, skb); - else if (wil->wdev->iftype == NL80211_IFTYPE_AP) + vring = wil_find_tx_bcast_2(wil, vif, skb); + else if (vif->wdev.iftype == NL80211_IFTYPE_AP) /* AP has a dedicated bcast VRING */ - vring = wil_find_tx_bcast_1(wil, skb); + vring = wil_find_tx_bcast_1(wil, vif, skb); else /* unexpected combination, fallback to duplicating * the skb in all stations VRINGs */ - vring = wil_find_tx_bcast_2(wil, skb); + vring = wil_find_tx_bcast_2(wil, vif, skb); } else { /* unicast, find specific VRING by dest. address */ - vring = wil_find_tx_ucast(wil, skb); + vring = wil_find_tx_ucast(wil, vif, skb); } if (unlikely(!vring)) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); goto drop; } /* set up vring entry */ - rc = wil_tx_vring(wil, vring, skb); + rc = wil_tx_vring(wil, vif, vring, skb); switch (rc) { case 0: /* shall we stop net queues? */ - wil_update_net_queues_bh(wil, vring, true); + wil_update_net_queues_bh(wil, vif, vring, true); /* statistics will be updated on the tx_complete */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2072,9 +2112,10 @@ static inline void wil_consume_skb(struct sk_buff *skb, bool acked) * * Safe to call from IRQ */ -int wil_tx_complete(struct wil6210_priv *wil, int ringid) +int wil_tx_complete(struct wil6210_vif *vif, int ringid) { - struct net_device *ndev = wil_to_ndev(wil); + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); struct device *dev = wil_to_dev(wil); struct vring *vring = &wil->vring_tx[ringid]; struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; @@ -2184,7 +2225,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* shall we wake net queues? */ if (done) - wil_update_net_queues(wil, vring, false); + wil_update_net_queues(wil, vif, vring, false); return done; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index fcdffaa8251b..5f07717acc2c 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -63,7 +64,9 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr, * [dword 1] * bit 0.. 3 : pkt_mode:4 * bit 4 : pkt_mode_en:1 - * bit 5..14 : reserved0:10 + * bit 5 : mac_id_en:1 + * bit 6..7 : mac_id:2 + * bit 8..14 : reserved0:7 * bit 15 : ack_policy_en:1 * bit 16..19 : dst_index:4 * bit 20 : dst_index_en:1 @@ -132,6 +135,14 @@ struct vring_tx_mac { #define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1 #define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10 +#define MAC_CFG_DESC_TX_1_MAC_ID_EN_POS 5 +#define MAC_CFG_DESC_TX_1_MAC_ID_EN_LEN 1 +#define MAC_CFG_DESC_TX_1_MAC_ID_EN_MSK 0x20 + +#define MAC_CFG_DESC_TX_1_MAC_ID_POS 6 +#define MAC_CFG_DESC_TX_1_MAC_ID_LEN 2 +#define MAC_CFG_DESC_TX_1_MAC_ID_MSK 0xc0 + #define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15 #define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1 #define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000 @@ -304,7 +315,7 @@ enum { * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field * bit 4.. 6 : cid:3 The Source index that was found during parsing the TA. * This field is used to define the source of the packet - * bit 7 : reserved:1 + * bit 7 : MAC_id_valid:1, 1 if MAC virtual number is valid. * bit 8.. 9 : mid:2 The MAC virtual number * bit 10..11 : frame_type:2 : The FC (b3-2) - MPDU Type * (management, data, control and extension) @@ -395,6 +406,7 @@ struct vring_rx_mac { #define RX_DMA_D0_CMD_DMA_EOP BIT(8) #define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */ #define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */ +#define RX_MAC_D0_MAC_ID_VALID BIT(7) /* Error field */ #define RX_DMA_ERROR_FCS BIT(0) @@ -451,7 +463,8 @@ static inline int wil_rxdesc_cid(struct vring_rx_desc *d) static inline int wil_rxdesc_mid(struct vring_rx_desc *d) { - return WIL_GET_BITS(d->mac.d0, 8, 9); + return (d->mac.d0 & RX_MAC_D0_MAC_ID_VALID) ? + WIL_GET_BITS(d->mac.d0, 8, 9) : 0; } static inline int wil_rxdesc_ftype(struct vring_rx_desc *d) @@ -517,7 +530,8 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb) void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); -void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq); +void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, + u8 cid, u8 tid, u16 seq); struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, int size, u16 ssn); void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 0df2aada6659..f9c5155025bc 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include "wmi.h" #include "wil_platform.h" +#include "fw.h" extern bool no_fw_recovery; extern unsigned int mtu_max; @@ -49,6 +50,11 @@ extern bool disable_ap_sme; #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ +/* maximum number of virtual interfaces the driver supports + * (including the main interface) + */ +#define WIL_MAX_VIFS 4 + /** * extract bits [@b0:@b1] (inclusive) from the value @x * it should be @b0 <= @b1, or result is incorrect @@ -463,13 +469,12 @@ struct vring_tx_data { u16 agg_timeout; u8 agg_amsdu; bool addba_in_progress; /* if set, agg_xxx is for request in progress */ + u8 mid; spinlock_t lock; }; enum { /* for wil6210_priv.status */ wil_status_fwready = 0, /* FW operational */ - wil_status_fwconnecting, - wil_status_fwconnected, wil_status_dontscan, wil_status_mbox_ready, /* MBOX structures ready */ wil_status_irqen, /* interrupts enabled - for debug */ @@ -541,7 +546,6 @@ struct wil_tid_crypto_rx { struct wil_p2p_info { struct ieee80211_channel listen_chan; u8 discovery_started; - u8 p2p_dev_started; u64 cookie; struct wireless_dev *pending_listen_wdev; unsigned int listen_duration; @@ -584,6 +588,7 @@ struct wil_net_stats { */ struct wil_sta_info { u8 addr[ETH_ALEN]; + u8 mid; enum wil_sta_status status; struct wil_net_stats stats; /* Rx BACK */ @@ -669,10 +674,44 @@ extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST]; extern u8 led_id; extern u8 led_polarity; +enum wil6210_vif_status { + wil_vif_fwconnecting, + wil_vif_fwconnected, + wil_vif_status_last /* keep last */ +}; + +struct wil6210_vif { + struct wireless_dev wdev; + struct net_device *ndev; + struct wil6210_priv *wil; + u8 mid; + DECLARE_BITMAP(status, wil_vif_status_last); + u32 privacy; /* secure connection? */ + u16 channel; /* relevant in AP mode */ + u8 hidden_ssid; /* relevant in AP mode */ + u32 ap_isolate; /* no intra-BSS communication */ + bool pbss; + int bcast_vring; + struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ + int locally_generated_disc; /* relevant in STA mode */ + struct timer_list connect_timer; + struct work_struct disconnect_worker; + /* scan */ + struct cfg80211_scan_request *scan_request; + struct timer_list scan_timer; /* detect scan timeout */ + struct wil_p2p_info p2p; + /* keep alive */ + struct list_head probe_client_pending; + struct mutex probe_client_mutex; /* protect @probe_client_pending */ + struct work_struct probe_client_worker; + int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ +}; + struct wil6210_priv { struct pci_dev *pdev; u32 bar_size; - struct wireless_dev *wdev; + struct wiphy *wiphy; + struct net_device *main_ndev; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); u8 fw_version[ETHTOOL_FWVERS_LEN]; @@ -686,21 +725,18 @@ struct wil6210_priv { DECLARE_BITMAP(hw_capa, hw_capa_last); DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX); DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX); - u8 n_mids; /* number of additional MIDs as reported by FW */ u32 recovery_count; /* num of FW recovery attempts in a short time */ u32 recovery_state; /* FW recovery state machine */ unsigned long last_fw_recovery; /* jiffies of last fw recovery */ wait_queue_head_t wq; /* for all wait_event() use */ + u8 max_vifs; /* maximum number of interfaces, including main */ + struct wil6210_vif *vifs[WIL_MAX_VIFS]; + struct mutex vif_mutex; /* protects access to VIF entries */ + atomic_t connected_vifs; /* profile */ struct cfg80211_chan_def monitor_chandef; u32 monitor_flags; - u32 privacy; /* secure connection? */ - u8 hidden_ssid; /* relevant in AP mode */ - u16 channel; /* relevant in AP mode */ int sinfo_gen; - u32 ap_isolate; /* no intra-BSS communication */ - struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ - int locally_generated_disc; /* relevant in STA mode */ /* interrupt moderation */ u32 tx_max_burst_duration; u32 tx_interframe_timeout; @@ -715,15 +751,13 @@ struct wil6210_priv { struct completion wmi_call; u16 wmi_seq; u16 reply_id; /**< wait for this WMI event */ + u8 reply_mid; void *reply_buf; u16 reply_size; struct workqueue_struct *wmi_wq; /* for deferred calls */ struct work_struct wmi_event_worker; struct workqueue_struct *wq_service; - struct work_struct disconnect_worker; struct work_struct fw_error_worker; /* for FW error recovery */ - struct timer_list connect_timer; - struct timer_list scan_timer; /* detect scan timeout */ struct list_head pending_wmi_ev; /* * protect pending_wmi_ev @@ -732,13 +766,10 @@ struct wil6210_priv { */ spinlock_t wmi_ev_lock; spinlock_t net_queue_lock; /* guarding stop/wake netif queue */ - int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ struct napi_struct napi_rx; struct napi_struct napi_tx; - /* keep alive */ - struct list_head probe_client_pending; - struct mutex probe_client_mutex; /* protect @probe_client_pending */ - struct work_struct probe_client_worker; + struct net_device napi_ndev; /* dummy net_device serving all VIFs */ + /* DMA related */ struct vring vring_rx; unsigned int rx_buf_len; @@ -746,11 +777,8 @@ struct wil6210_priv { struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; - int bcast_vring; u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ u32 dma_addr_size; /* indicates dma addr size */ - /* scan */ - struct cfg80211_scan_request *scan_request; struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ /* statistics */ @@ -770,13 +798,10 @@ struct wil6210_priv { struct pmc_ctx pmc; - bool pbss; - - struct wil_p2p_info p2p; + u8 p2p_dev_started; /* P2P_DEVICE vif */ struct wireless_dev *p2p_wdev; - struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */ struct wireless_dev *radio_wdev; /* High Access Latency Policy voting */ @@ -798,13 +823,35 @@ struct wil6210_priv { u32 iccm_base; }; -#define wil_to_wiphy(i) (i->wdev->wiphy) +#define wil_to_wiphy(i) (i->wiphy) #define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i))) #define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w)) -#define wil_to_wdev(i) (i->wdev) #define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w)) -#define wil_to_ndev(i) (wil_to_wdev(i)->netdev) #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) +#define ndev_to_vif(n) (struct wil6210_vif *)(netdev_priv(n)) +#define vif_to_wil(v) (v->wil) +#define vif_to_ndev(v) (v->ndev) +#define vif_to_wdev(v) (&v->wdev) + +static inline struct wil6210_vif *wdev_to_vif(struct wil6210_priv *wil, + struct wireless_dev *wdev) +{ + /* main interface is shared with P2P device */ + if (wdev == wil->p2p_wdev) + return ndev_to_vif(wil->main_ndev); + else + return container_of(wdev, struct wil6210_vif, wdev); +} + +static inline struct wireless_dev * +vif_to_radio_wdev(struct wil6210_priv *wil, struct wil6210_vif *vif) +{ + /* main interface is shared with P2P device */ + if (vif->mid) + return vif_to_wdev(vif); + else + return wil->radio_wdev; +} __printf(2, 3) void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); @@ -817,7 +864,7 @@ void __wil_info(struct wil6210_priv *wil, const char *fmt, ...); __printf(2, 3) void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg(wil, fmt, arg...) do { \ - netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ + netdev_dbg(wil->main_ndev, fmt, ##arg); \ wil_dbg_trace(wil, fmt, ##arg); \ } while (0) @@ -900,9 +947,18 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, size_t count); +struct wil6210_vif * +wil_vif_alloc(struct wil6210_priv *wil, const char *name, + unsigned char name_assign_type, enum nl80211_iftype iftype); +void wil_vif_free(struct wil6210_vif *vif); void *wil_if_alloc(struct device *dev); +bool wil_has_other_active_ifaces(struct wil6210_priv *wil, + struct net_device *ndev, bool up, bool ok); +bool wil_has_active_ifaces(struct wil6210_priv *wil, bool up, bool ok); void wil_if_free(struct wil6210_priv *wil); +int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif); int wil_if_add(struct wil6210_priv *wil); +void wil_vif_remove(struct wil6210_priv *wil, u8 mid); void wil_if_remove(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil); @@ -918,7 +974,7 @@ int wil_down(struct wil6210_priv *wil); int __wil_down(struct wil6210_priv *wil); void wil_refresh_fw_capabilities(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); -int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); +int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac); void wil_set_ethtoolops(struct net_device *ndev); struct fw_map *wil_find_fw_mapping(const char *section); @@ -927,40 +983,45 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, struct wil6210_mbox_hdr *hdr); -int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len); +int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len); void wmi_recv_cmd(struct wil6210_priv *wil); -int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, +int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, u16 reply_id, void *reply, u8 reply_size, int to_msec); void wmi_event_worker(struct work_struct *work); void wmi_event_flush(struct wil6210_priv *wil); -int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid); -int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); +int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid); +int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid); int wmi_set_channel(struct wil6210_priv *wil, int channel); int wmi_get_channel(struct wil6210_priv *wil, int *channel); -int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, +int wmi_del_cipher_key(struct wil6210_vif *vif, u8 key_index, const void *mac_addr, int key_usage); -int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, +int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, const void *mac_addr, int key_len, const void *key, int key_usage); int wmi_echo(struct wil6210_priv *wil); -int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); +int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, +int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason, bool full_disconnect, bool del_sta); -int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); -int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); -int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); -int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, +int wmi_addba(struct wil6210_priv *wil, u8 mid, + u8 ringid, u8 size, u16 timeout); +int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason); +int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason); +int wmi_addba_rx_resp(struct wil6210_priv *wil, + u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, u16 agg_wsize, u16 timeout); int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile); int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short); int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short); -int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid); -int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, - u8 dialog_token, __le16 ba_param_set, +int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid); +int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, + const u8 *mac, enum nl80211_iftype iftype); +int wmi_port_delete(struct wil6210_priv *wil, u8 mid); +int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, + u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); @@ -976,28 +1037,31 @@ void wil6210_mask_halp(struct wil6210_priv *wil); /* P2P */ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request); -void wil_p2p_discovery_timer_fn(struct timer_list *t); -int wil_p2p_search(struct wil6210_priv *wil, +int wil_p2p_search(struct wil6210_vif *vif, struct cfg80211_scan_request *request); int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, unsigned int duration, struct ieee80211_channel *chan, u64 *cookie); -u8 wil_p2p_stop_discovery(struct wil6210_priv *wil); -int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie); +u8 wil_p2p_stop_discovery(struct wil6210_vif *vif); +int wil_p2p_cancel_listen(struct wil6210_vif *vif, u64 cookie); void wil_p2p_listen_expired(struct work_struct *work); void wil_p2p_search_expired(struct work_struct *work); void wil_p2p_stop_radio_operations(struct wil6210_priv *wil); void wil_p2p_delayed_listen_work(struct work_struct *work); /* WMI for P2P */ -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi); -int wmi_start_listen(struct wil6210_priv *wil); -int wmi_start_search(struct wil6210_priv *wil); -int wmi_stop_discovery(struct wil6210_priv *wil); +int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi); +int wmi_start_listen(struct wil6210_vif *vif); +int wmi_start_search(struct wil6210_vif *vif); +int wmi_stop_discovery(struct wil6210_vif *vif); int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); +int wil_cfg80211_iface_combinations_from_fw( + struct wil6210_priv *wil, + const struct wil_fw_record_concurrency *conc); +int wil_vif_prepare_stop(struct wil6210_vif *vif); #if defined(CONFIG_WIL6210_DEBUGFS) int wil6210_debugfs_init(struct wil6210_priv *wil); @@ -1007,44 +1071,47 @@ static inline int wil6210_debugfs_init(struct wil6210_priv *wil) { return 0; } static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {} #endif -int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, +int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct station_info *sinfo); -struct wireless_dev *wil_cfg80211_init(struct device *dev); -void wil_wdev_free(struct wil6210_priv *wil); +struct wil6210_priv *wil_cfg80211_init(struct device *dev); +void wil_cfg80211_deinit(struct wil6210_priv *wil); void wil_p2p_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); -int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid, u8 is_go); -int wmi_pcp_stop(struct wil6210_priv *wil); +int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan, + u8 hidden_ssid, u8 is_go); +int wmi_pcp_stop(struct wil6210_vif *vif); int wmi_led_cfg(struct wil6210_priv *wil, bool enable); -int wmi_abort_scan(struct wil6210_priv *wil); -void wil_abort_scan(struct wil6210_priv *wil, bool sync); +int wmi_abort_scan(struct wil6210_vif *vif); +void wil_abort_scan(struct wil6210_vif *vif, bool sync); +void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync); void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps); -void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, +void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, u16 reason_code, bool from_event); -void wil_probe_client_flush(struct wil6210_priv *wil); +void wil_probe_client_flush(struct wil6210_vif *vif); void wil_probe_client_worker(struct work_struct *work); +void wil_disconnect_worker(struct work_struct *work); int wil_rx_init(struct wil6210_priv *wil, u16 size); void wil_rx_fini(struct wil6210_priv *wil); /* TX API */ -int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, +int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, int cid, int tid); void wil_vring_fini_tx(struct wil6210_priv *wil, int id); -int wil_tx_init(struct wil6210_priv *wil, int cid); -int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size); -int wil_bcast_init(struct wil6210_priv *wil); -void wil_bcast_fini(struct wil6210_priv *wil); - -void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring, - bool should_stop); -void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring, - bool check_stop); +int wil_tx_init(struct wil6210_vif *vif, int cid); +int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size); +int wil_bcast_init(struct wil6210_vif *vif); +void wil_bcast_fini(struct wil6210_vif *vif); +void wil_bcast_fini_all(struct wil6210_priv *wil); + +void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, bool should_stop); +void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct vring *vring, bool check_stop); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); -int wil_tx_complete(struct wil6210_priv *wil, int ringid); +int wil_tx_complete(struct wil6210_vif *vif, int ringid); void wil6210_unmask_irq_tx(struct wil6210_priv *wil); /* RX API */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index b31e2514f8c2..a3dda9a97c1f 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -341,6 +341,10 @@ static const char *cmdid2name(u16 cmdid) return "WMI_GET_PCP_CHANNEL_CMD"; case WMI_P2P_CFG_CMDID: return "WMI_P2P_CFG_CMD"; + case WMI_PORT_ALLOCATE_CMDID: + return "WMI_PORT_ALLOCATE_CMD"; + case WMI_PORT_DELETE_CMDID: + return "WMI_PORT_DELETE_CMD"; case WMI_START_LISTEN_CMDID: return "WMI_START_LISTEN_CMD"; case WMI_START_SEARCH_CMDID: @@ -479,6 +483,10 @@ static const char *eventid2name(u16 eventid) return "WMI_GET_PCP_CHANNEL_EVENT"; case WMI_P2P_CFG_DONE_EVENTID: return "WMI_P2P_CFG_DONE_EVENT"; + case WMI_PORT_ALLOCATED_EVENTID: + return "WMI_PORT_ALLOCATED_EVENT"; + case WMI_PORT_DELETED_EVENTID: + return "WMI_PORT_DELETED_EVENT"; case WMI_LISTEN_STARTED_EVENTID: return "WMI_LISTEN_STARTED_EVENT"; case WMI_SEARCH_STARTED_EVENTID: @@ -516,7 +524,8 @@ static const char *eventid2name(u16 eventid) } } -static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) +static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, + void *buf, u16 len) { struct { struct wil6210_mbox_hdr hdr; @@ -528,7 +537,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) .len = cpu_to_le16(sizeof(cmd.wmi) + len), }, .wmi = { - .mid = 0, + .mid = mid, .command_id = cpu_to_le16(cmdid), }, }; @@ -612,8 +621,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); /* set command */ - wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n", - cmdid2name(cmdid), cmdid, len); + wil_dbg_wmi(wil, "sending %s (0x%04x) [%d] mid %d\n", + cmdid2name(cmdid), cmdid, len, mid); wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, sizeof(cmd), true); wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, @@ -637,31 +646,34 @@ out: return rc; } -int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) +int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len) { int rc; mutex_lock(&wil->wmi_mutex); - rc = __wmi_send(wil, cmdid, buf, len); + rc = __wmi_send(wil, cmdid, mid, buf, len); mutex_unlock(&wil->wmi_mutex); return rc; } /*=== Event handlers ===*/ -static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len) { - struct wireless_dev *wdev = wil->wdev; + struct wil6210_priv *wil = vif_to_wil(vif); + struct wiphy *wiphy = wil_to_wiphy(wil); struct wmi_ready_event *evt = d; - wil->n_mids = evt->numof_additional_mids; - wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n", wil->fw_version, le32_to_cpu(evt->sw_version), - evt->mac, wil->n_mids); + evt->mac, evt->numof_additional_mids); + if (evt->numof_additional_mids + 1 < wil->max_vifs) { + wil_err(wil, "FW does not support enough MIDs (need %d)", + wil->max_vifs - 1); + return; /* FW load will fail after timeout */ + } /* ignore MAC address, we already have it from the boot loader */ - strlcpy(wdev->wiphy->fw_version, wil->fw_version, - sizeof(wdev->wiphy->fw_version)); + strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version)); if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) { wil_dbg_wmi(wil, "rfc calibration result %d\n", @@ -674,8 +686,9 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) complete(&wil->wmi_ready); } -static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_rx_mgmt_packet_event *data = d; struct wiphy *wiphy = wil_to_wiphy(wil); struct ieee80211_mgmt *rx_mgmt_frame = @@ -753,14 +766,14 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); } } else { - mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_rx_mgmt(wil->radio_wdev, freq, signal, + mutex_lock(&wil->vif_mutex); + cfg80211_rx_mgmt(vif_to_radio_wdev(wil, vif), freq, signal, (void *)rx_mgmt_frame, d_len, 0); - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); } } -static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_tx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) { struct wmi_tx_mgmt_packet_event *data = d; struct ieee80211_mgmt *mgmt_frame = @@ -771,11 +784,13 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) flen, true); } -static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, +static void wmi_evt_scan_complete(struct wil6210_vif *vif, int id, void *d, int len) { - mutex_lock(&wil->p2p_wdev_mutex); - if (wil->scan_request) { + struct wil6210_priv *wil = vif_to_wil(vif); + + mutex_lock(&wil->vif_mutex); + if (vif->scan_request) { struct wmi_scan_complete_event *data = d; int status = le32_to_cpu(data->status); struct cfg80211_scan_info info = { @@ -785,26 +800,28 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status); wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", - wil->scan_request, info.aborted); - del_timer_sync(&wil->scan_timer); - cfg80211_scan_done(wil->scan_request, &info); - wil->radio_wdev = wil->wdev; - wil->scan_request = NULL; + vif->scan_request, info.aborted); + del_timer_sync(&vif->scan_timer); + cfg80211_scan_done(vif->scan_request, &info); + if (vif->mid == 0) + wil->radio_wdev = wil->main_ndev->ieee80211_ptr; + vif->scan_request = NULL; wake_up_interruptible(&wil->wq); - if (wil->p2p.pending_listen_wdev) { + if (vif->p2p.pending_listen_wdev) { wil_dbg_misc(wil, "Scheduling delayed listen\n"); - schedule_work(&wil->p2p.delayed_listen_work); + schedule_work(&vif->p2p.delayed_listen_work); } } else { wil_err(wil, "SCAN_COMPLETE while not scanning\n"); } - mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->vif_mutex); } -static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) { - struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); struct wmi_connect_event *evt = d; int ch; /* channel number */ struct station_info sinfo; @@ -869,12 +886,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { - if (!test_bit(wil_status_fwconnecting, wil->status)) { + if (!test_bit(wil_vif_fwconnecting, vif->status)) { wil_err(wil, "Not in connecting state\n"); mutex_unlock(&wil->mutex); return; } - del_timer_sync(&wil->connect_timer); + del_timer_sync(&vif->connect_timer); } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (wil->sta[evt->cid].status != wil_sta_unused) { @@ -886,13 +903,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); + wil->sta[evt->cid].mid = vif->mid; wil->sta[evt->cid].status = wil_sta_conn_pending; - rc = wil_tx_init(wil, evt->cid); + rc = wil_tx_init(vif, evt->cid); if (rc) { wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", evt->cid, rc); - wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, + wmi_disconnect_sta(vif, wil->sta[evt->cid].addr, WLAN_REASON_UNSPECIFIED, false, false); } else { wil_info(wil, "successful connection to CID %d\n", evt->cid); @@ -912,14 +930,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } else { struct wiphy *wiphy = wil_to_wiphy(wil); - cfg80211_ref_bss(wiphy, wil->bss); - cfg80211_connect_bss(ndev, evt->bssid, wil->bss, + cfg80211_ref_bss(wiphy, vif->bss); + cfg80211_connect_bss(ndev, evt->bssid, vif->bss, assoc_req_ie, assoc_req_ielen, assoc_resp_ie, assoc_resp_ielen, WLAN_STATUS_SUCCESS, GFP_KERNEL, NL80211_TIMEOUT_UNSPECIFIED); } - wil->bss = NULL; + vif->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (rc) { @@ -947,19 +965,23 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) wil->sta[evt->cid].status = wil_sta_connected; wil->sta[evt->cid].aid = evt->aid; - set_bit(wil_status_fwconnected, wil->status); - wil_update_net_queues_bh(wil, NULL, false); + if (!test_and_set_bit(wil_vif_fwconnected, vif->status)) + atomic_inc(&wil->connected_vifs); + wil_update_net_queues_bh(wil, vif, NULL, false); out: - if (rc) + if (rc) { wil->sta[evt->cid].status = wil_sta_unused; - clear_bit(wil_status_fwconnecting, wil->status); + wil->sta[evt->cid].mid = U8_MAX; + } + clear_bit(wil_vif_fwconnecting, vif->status); mutex_unlock(&wil->mutex); } -static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, +static void wmi_evt_disconnect(struct wil6210_vif *vif, int id, void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_disconnect_event *evt = d; u16 reason_code = le16_to_cpu(evt->protocol_reason_status); @@ -976,7 +998,7 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, } mutex_lock(&wil->mutex); - wil6210_disconnect(wil, evt->bssid, reason_code, true); + wil6210_disconnect(vif, evt->bssid, reason_code, true); mutex_unlock(&wil->mutex); } @@ -984,10 +1006,10 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, * Firmware reports EAPOL frame using WME event. * Reconstruct Ethernet frame and deliver it via normal Rx */ -static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, - void *d, int len) +static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len) { - struct net_device *ndev = wil_to_ndev(wil); + struct wil6210_priv *wil = vif_to_wil(vif); + struct net_device *ndev = vif_to_ndev(vif); struct wmi_eapol_rx_event *evt = d; u16 eapol_len = le16_to_cpu(evt->eapol_len); int sz = eapol_len + ETH_HLEN; @@ -996,10 +1018,10 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, int cid; struct wil_net_stats *stats = NULL; - wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, - evt->src_mac); + wil_dbg_wmi(wil, "EAPOL len %d from %pM MID %d\n", eapol_len, + evt->src_mac, vif->mid); - cid = wil_find_cid(wil, evt->src_mac); + cid = wil_find_cid(wil, vif->mid, evt->src_mac); if (cid >= 0) stats = &wil->sta[cid].stats; @@ -1034,13 +1056,14 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, } } -static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_vring_en_event *evt = d; u8 vri = evt->vring_index; - struct wireless_dev *wdev = wil_to_wdev(wil); + struct wireless_dev *wdev = vif_to_wdev(vif); - wil_dbg_wmi(wil, "Enable vring %d\n", vri); + wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); if (vri >= ARRAY_SIZE(wil->vring_tx)) { wil_err(wil, "Enable for invalid vring %d\n", vri); @@ -1052,15 +1075,16 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) * wil_cfg80211_change_station() */ wil->vring_tx_data[vri].dot1x_open = true; - if (vri == wil->bcast_vring) /* no BA for bcast */ + if (vri == vif->bcast_vring) /* no BA for bcast */ return; if (agg_wsize >= 0) wil_addba_tx_request(wil, vri, agg_wsize); } -static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, - int len) +static void wmi_evt_ba_status(struct wil6210_vif *vif, int id, + void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_ba_status_event *evt = d; struct vring_tx_data *txdata; @@ -1089,19 +1113,21 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, txdata->addba_in_progress = false; } -static void wmi_evt_addba_rx_req(struct wil6210_priv *wil, int id, void *d, - int len) +static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id, + void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_rcp_addba_req_event *evt = d; - wil_addba_rx_request(wil, evt->cidxtid, evt->dialog_token, + wil_addba_rx_request(wil, vif->mid, evt->cidxtid, evt->dialog_token, evt->ba_param_set, evt->ba_timeout, evt->ba_seq_ctrl); } -static void wmi_evt_delba(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_delba(struct wil6210_vif *vif, int id, void *d, int len) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_delba_event *evt = d; u8 cid, tid; u16 reason = __le16_to_cpu(evt->reason); @@ -1110,8 +1136,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) might_sleep(); parse_cidxtid(evt->cidxtid, &cid, &tid); - wil_dbg_wmi(wil, "DELBA CID %d TID %d from %s reason %d\n", - cid, tid, + wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n", + vif->mid, cid, tid, evt->from_initiator ? "originator" : "recipient", reason); if (!evt->from_initiator) { @@ -1148,8 +1174,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } static void -wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len) +wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_sched_scan_result_event *data = d; struct wiphy *wiphy = wil_to_wiphy(wil); struct ieee80211_mgmt *rx_mgmt_frame = @@ -1220,15 +1247,17 @@ wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len) * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" */ -static void wmi_evt_ignore(struct wil6210_priv *wil, int id, void *d, int len) +static void wmi_evt_ignore(struct wil6210_vif *vif, int id, void *d, int len) { + struct wil6210_priv *wil = vif_to_wil(vif); + wil_dbg_wmi(wil, "Ignore event 0x%04x len %d\n", id, len); } static const struct { int eventid; - void (*handler)(struct wil6210_priv *wil, int eventid, - void *data, int data_len); + void (*handler)(struct wil6210_vif *vif, + int eventid, void *data, int data_len); } wmi_evt_handlers[] = { {WMI_READY_EVENTID, wmi_evt_ready}, {WMI_FW_READY_EVENTID, wmi_evt_ignore}, @@ -1325,6 +1354,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) (len >= sizeof(struct wmi_cmd_hdr))) { struct wmi_cmd_hdr *wmi = &evt->event.wmi; u16 id = le16_to_cpu(wmi->command_id); + u8 mid = wmi->mid; u32 tstamp = le32_to_cpu(wmi->fw_timestamp); if (test_bit(wil_status_resuming, wil->status)) { if (id == WMI_TRAFFIC_RESUME_EVENTID) @@ -1336,7 +1366,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) id); } spin_lock_irqsave(&wil->wmi_ev_lock, flags); - if (wil->reply_id && wil->reply_id == id) { + if (wil->reply_id && wil->reply_id == id && + wil->reply_mid == mid) { if (wil->reply_buf) { memcpy(wil->reply_buf, wmi, min(len, wil->reply_size)); @@ -1384,7 +1415,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) n - num_immed_reply, num_immed_reply); } -int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, +int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, u16 reply_id, void *reply, u8 reply_size, int to_msec) { int rc; @@ -1394,12 +1425,13 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, spin_lock(&wil->wmi_ev_lock); wil->reply_id = reply_id; + wil->reply_mid = mid; wil->reply_buf = reply; wil->reply_size = reply_size; reinit_completion(&wil->wmi_call); spin_unlock(&wil->wmi_ev_lock); - rc = __wmi_send(wil, cmdid, buf, len); + rc = __wmi_send(wil, cmdid, mid, buf, len); if (rc) goto out; @@ -1419,6 +1451,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, out: spin_lock(&wil->wmi_ev_lock); wil->reply_id = 0; + wil->reply_mid = U8_MAX; wil->reply_buf = NULL; wil->reply_size = 0; spin_unlock(&wil->wmi_ev_lock); @@ -1430,27 +1463,31 @@ out: int wmi_echo(struct wil6210_priv *wil) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_echo_cmd cmd = { .value = cpu_to_le32(0x12345678), }; - return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), + return wmi_call(wil, WMI_ECHO_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_ECHO_RSP_EVENTID, NULL, 0, 50); } int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_set_mac_address_cmd cmd; ether_addr_copy(cmd.mac, addr); wil_dbg_wmi(wil, "Set MAC %pM\n", addr); - return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, vif->mid, + &cmd, sizeof(cmd)); } int wmi_led_cfg(struct wil6210_priv *wil, bool enable) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc = 0; struct wmi_led_cfg_cmd cmd = { .led_mode = enable, @@ -1487,7 +1524,7 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable) "%s led %d\n", enable ? "enabling" : "disabling", led_id); - rc = wmi_call(wil, WMI_LED_CFG_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_LED_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -1503,9 +1540,10 @@ out: return rc; } -int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, - u8 chan, u8 hidden_ssid, u8 is_go) +int wmi_pcp_start(struct wil6210_vif *vif, + int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct wmi_pcp_start_cmd cmd = { @@ -1524,7 +1562,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, struct wmi_pcp_started_event evt; } __packed reply; - if (!wil->privacy) + if (!vif->privacy) cmd.disable_sec = 1; if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || @@ -1546,7 +1584,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP */ - rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_PCP_START_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000); if (rc) return rc; @@ -1561,20 +1599,22 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, return rc; } -int wmi_pcp_stop(struct wil6210_priv *wil) +int wmi_pcp_stop(struct wil6210_vif *vif) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; rc = wmi_led_cfg(wil, false); if (rc) return rc; - return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0, + return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0, WMI_PCP_STOPPED_EVENTID, NULL, 0, 20); } -int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) +int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_set_ssid_cmd cmd = { .ssid_len = cpu_to_le32(ssid_len), }; @@ -1584,11 +1624,12 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) memcpy(cmd.ssid, ssid, ssid_len); - return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_SET_SSID_CMDID, vif->mid, &cmd, sizeof(cmd)); } -int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) +int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct { struct wmi_cmd_hdr wmi; @@ -1596,8 +1637,8 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ - rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID, - &reply, sizeof(reply), 20); + rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0, + WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; @@ -1613,22 +1654,25 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) int wmi_set_channel(struct wil6210_priv *wil, int channel) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_set_pcp_channel_cmd cmd = { .channel = channel - 1, }; - return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, vif->mid, + &cmd, sizeof(cmd)); } int wmi_get_channel(struct wil6210_priv *wil, int *channel) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct { struct wmi_cmd_hdr wmi; struct wmi_set_pcp_channel_cmd cmd; } __packed reply; - rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0, WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; @@ -1641,8 +1685,9 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) return 0; } -int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi) +int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct wmi_p2p_cfg_cmd cmd = { .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER, @@ -1656,7 +1701,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi) wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); - rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_P2P_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300); if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status); @@ -1666,8 +1711,9 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi) return rc; } -int wmi_start_listen(struct wil6210_priv *wil) +int wmi_start_listen(struct wil6210_vif *vif) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct { struct wmi_cmd_hdr wmi; @@ -1676,7 +1722,7 @@ int wmi_start_listen(struct wil6210_priv *wil) wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); - rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0, WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300); if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { wil_err(wil, "device failed to start listen. status %d\n", @@ -1687,8 +1733,9 @@ int wmi_start_listen(struct wil6210_priv *wil) return rc; } -int wmi_start_search(struct wil6210_priv *wil) +int wmi_start_search(struct wil6210_vif *vif) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct { struct wmi_cmd_hdr wmi; @@ -1697,7 +1744,7 @@ int wmi_start_search(struct wil6210_priv *wil) wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); - rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_START_SEARCH_CMDID, vif->mid, NULL, 0, WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300); if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) { wil_err(wil, "device failed to start search. status %d\n", @@ -1708,13 +1755,14 @@ int wmi_start_search(struct wil6210_priv *wil) return rc; } -int wmi_stop_discovery(struct wil6210_priv *wil) +int wmi_stop_discovery(struct wil6210_vif *vif) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n"); - rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0, WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100); if (rc) @@ -1723,9 +1771,10 @@ int wmi_stop_discovery(struct wil6210_priv *wil) return rc; } -int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, +int wmi_del_cipher_key(struct wil6210_vif *vif, u8 key_index, const void *mac_addr, int key_usage) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_delete_cipher_key_cmd cmd = { .key_index = key_index, }; @@ -1733,13 +1782,15 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, if (mac_addr) memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); - return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, vif->mid, + &cmd, sizeof(cmd)); } -int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, +int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, const void *mac_addr, int key_len, const void *key, int key_usage) { + struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_add_cipher_key_cmd cmd = { .key_index = key_index, .key_usage = key_usage, @@ -1753,11 +1804,13 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, if (mac_addr) memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); - return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, vif->mid, + &cmd, sizeof(cmd)); } -int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) +int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie) { + struct wil6210_priv *wil = vif_to_wil(vif); static const char *const names[] = { [WMI_FRAME_BEACON] = "BEACON", [WMI_FRAME_PROBE_REQ] = "PROBE_REQ", @@ -1786,7 +1839,7 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) /* BUG: FW API define ieLen as u8. Will fix FW */ cmd->ie_len = cpu_to_le16(ie_len); memcpy(cmd->ie_info, ie, ie_len); - rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len); + rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len); kfree(cmd); out: if (rc) { @@ -1808,6 +1861,7 @@ out: */ int wmi_rxon(struct wil6210_priv *wil, bool on) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct { struct wmi_cmd_hdr wmi; @@ -1817,13 +1871,13 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) wil_info(wil, "(%s)\n", on ? "on" : "off"); if (on) { - rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0, WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 100); if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS)) rc = -EINVAL; } else { - rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0, WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20); } @@ -1832,8 +1886,9 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) { - struct wireless_dev *wdev = wil->wdev; - struct net_device *ndev = wil_to_ndev(wil); + struct net_device *ndev = wil->main_ndev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wmi_cfg_rx_chain_cmd cmd = { .action = WMI_RX_CHAIN_ADD, .rx_sw_ring = { @@ -1877,7 +1932,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK; /* typical time for secure PCP is 840ms */ - rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); if (rc) return rc; @@ -1895,6 +1950,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct wmi_temp_sense_cmd cmd = { .measure_baseband_en = cpu_to_le32(!!t_bb), @@ -1906,7 +1962,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) struct wmi_temp_sense_done_event evt; } __packed reply; - rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) return rc; @@ -1919,9 +1975,10 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, +int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason, bool full_disconnect, bool del_sta) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; u16 reason_code; struct wmi_disconnect_sta_cmd disc_sta_cmd = { @@ -1937,16 +1994,17 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); - wil->locally_generated_disc = true; + vif->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); - rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, + rc = wmi_call(wil, WMI_DEL_STA_CMDID, vif->mid, &del_sta_cmd, sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); } else { ether_addr_copy(disc_sta_cmd.dst_mac, mac); - rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd, - sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID, + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, vif->mid, + &disc_sta_cmd, sizeof(disc_sta_cmd), + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); } /* failure to disconnect in reasonable time treated as FW error */ @@ -1967,12 +2025,13 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, reply.evt.disconnect_reason); wil->sinfo_gen++; - wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + wil6210_disconnect(vif, reply.evt.bssid, reason_code, true); } return 0; } -int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) +int wmi_addba(struct wil6210_priv *wil, u8 mid, + u8 ringid, u8 size, u16 timeout) { struct wmi_vring_ba_en_cmd cmd = { .ringid = ringid, @@ -1984,10 +2043,10 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, timeout); - return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd)); } -int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason) +int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason) { struct wmi_vring_ba_dis_cmd cmd = { .ringid = ringid, @@ -1996,10 +2055,10 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason) wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); - return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); } -int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason) +int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason) { struct wmi_rcp_delba_cmd cmd = { .cidxtid = cidxtid, @@ -2009,10 +2068,11 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason) wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason); - return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd)); } -int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, +int wmi_addba_rx_resp(struct wil6210_priv *wil, + u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, u16 agg_wsize, u16 timeout) { int rc; @@ -2035,10 +2095,11 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, } __packed reply; wil_dbg_wmi(wil, - "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s\n", - cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-"); + "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n", + mid, cid, tid, agg_wsize, + timeout, status, amsdu ? "+" : "-"); - rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, mid, &cmd, sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -2056,6 +2117,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct wmi_ps_dev_profile_cfg_cmd cmd = { .ps_profile = ps_profile, @@ -2070,7 +2132,8 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR); - rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -2089,6 +2152,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct wmi_set_mgmt_retry_limit_cmd cmd = { .mgmt_retry_limit = retry_short, @@ -2105,7 +2169,8 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -2122,6 +2187,7 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct { struct wmi_cmd_hdr wmi; @@ -2134,7 +2200,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) return -ENOTSUPP; reply.evt.mgmt_retry_limit = 0; - rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0, WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -2146,21 +2212,23 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) return 0; } -int wmi_abort_scan(struct wil6210_priv *wil) +int wmi_abort_scan(struct wil6210_vif *vif) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n"); - rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, NULL, 0); + rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, vif->mid, NULL, 0); if (rc) wil_err(wil, "Failed to abort scan (%d)\n", rc); return rc; } -int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) +int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid) { + struct wil6210_priv *wil = vif_to_wil(vif); int rc; struct wmi_new_sta_cmd cmd = { .aid = aid, @@ -2170,7 +2238,7 @@ int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) ether_addr_copy(cmd.dst_mac, mac); - rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd)); + rc = wmi_send(wil, WMI_NEW_STA_CMDID, vif->mid, &cmd, sizeof(cmd)); if (rc) wil_err(wil, "Failed to send new sta (%d)\n", rc); @@ -2206,6 +2274,7 @@ static const char *suspend_status2name(u8 status) int wmi_suspend(struct wil6210_priv *wil) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct wmi_traffic_suspend_cmd cmd = { .wakeup_trigger = wil->wakeup_trigger, @@ -2221,7 +2290,8 @@ int wmi_suspend(struct wil6210_priv *wil) reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE; - rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), suspend_to); if (rc) { @@ -2289,6 +2359,7 @@ static void resume_triggers2string(u32 triggers, char *string, int str_size) int wmi_resume(struct wil6210_priv *wil) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; char string[100]; struct { @@ -2299,7 +2370,7 @@ int wmi_resume(struct wil6210_priv *wil) reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN; - rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0, WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), WIL_WAIT_FOR_SUSPEND_RESUME_COMP); if (rc) @@ -2313,14 +2384,100 @@ int wmi_resume(struct wil6210_priv *wil) return reply.evt.status; } -static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, +int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, + const u8 *mac, enum nl80211_iftype iftype) +{ + int rc; + struct wmi_port_allocate_cmd cmd = { + .mid = mid, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_port_allocated_event evt; + } __packed reply; + + wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n", + mid, iftype, mac); + + ether_addr_copy(cmd.mac, mac); + switch (iftype) { + case NL80211_IFTYPE_STATION: + cmd.port_role = WMI_PORT_STA; + break; + case NL80211_IFTYPE_AP: + cmd.port_role = WMI_PORT_AP; + break; + case NL80211_IFTYPE_P2P_CLIENT: + cmd.port_role = WMI_PORT_P2P_CLIENT; + break; + case NL80211_IFTYPE_P2P_GO: + cmd.port_role = WMI_PORT_P2P_GO; + break; + /* what about monitor??? */ + default: + wil_err(wil, "unsupported iftype: %d\n", iftype); + return -EINVAL; + } + + reply.evt.status = WMI_FW_STATUS_FAILURE; + + rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid, + &cmd, sizeof(cmd), + WMI_PORT_ALLOCATED_EVENTID, &reply, + sizeof(reply), 300); + if (rc) { + wil_err(wil, "failed to allocate port, status %d\n", rc); + return rc; + } + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "WMI_PORT_ALLOCATE returned status %d\n", + reply.evt.status); + return -EINVAL; + } + + return 0; +} + +int wmi_port_delete(struct wil6210_priv *wil, u8 mid) +{ + int rc; + struct wmi_port_delete_cmd cmd = { + .mid = mid, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_port_deleted_event evt; + } __packed reply; + + wil_dbg_misc(wil, "port delete, mid %d\n", mid); + + reply.evt.status = WMI_FW_STATUS_FAILURE; + + rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid, + &cmd, sizeof(cmd), + WMI_PORT_DELETED_EVENTID, &reply, + sizeof(reply), 2000); + if (rc) { + wil_err(wil, "failed to delete port, status %d\n", rc); + return rc; + } + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "WMI_PORT_DELETE returned status %d\n", + reply.evt.status); + return -EINVAL; + } + + return 0; +} + +static bool wmi_evt_call_handler(struct wil6210_vif *vif, int id, void *d, int len) { uint i; for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) { if (wmi_evt_handlers[i].eventid == id) { - wmi_evt_handlers[i].handler(wil, id, d, len); + wmi_evt_handlers[i].handler(vif, id, d, len); return true; } } @@ -2332,19 +2489,39 @@ static void wmi_event_handle(struct wil6210_priv *wil, struct wil6210_mbox_hdr *hdr) { u16 len = le16_to_cpu(hdr->len); + struct wil6210_vif *vif; if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wmi_cmd_hdr))) { struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]); void *evt_data = (void *)(&wmi[1]); u16 id = le16_to_cpu(wmi->command_id); + u8 mid = wmi->mid; + + wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x,%d)\n", + eventid2name(id), id, wil->reply_id, + wil->reply_mid); + + if (mid == MID_BROADCAST) + mid = 0; + if (mid >= wil->max_vifs) { + wil_dbg_wmi(wil, "invalid mid %d, event skipped\n", + mid); + return; + } + vif = wil->vifs[mid]; + if (!vif) { + wil_dbg_wmi(wil, "event for empty VIF(%d), skipped\n", + mid); + return; + } - wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n", - eventid2name(id), id, wil->reply_id); /* check if someone waits for this event */ - if (wil->reply_id && wil->reply_id == id) { + if (wil->reply_id && wil->reply_id == id && + wil->reply_mid == mid) { WARN_ON(wil->reply_buf); - wmi_evt_call_handler(wil, id, evt_data, + + wmi_evt_call_handler(vif, id, evt_data, len - sizeof(*wmi)); wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n", id); @@ -2353,7 +2530,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, } /* unsolicited event */ /* search for handler */ - if (!wmi_evt_call_handler(wil, id, evt_data, + if (!wmi_evt_call_handler(vif, id, evt_data, len - sizeof(*wmi))) { wil_info(wil, "Unhandled event 0x%04x\n", id); } @@ -2523,6 +2700,7 @@ wmi_sched_scan_set_plans(struct wil6210_priv *wil, int wmi_start_sched_scan(struct wil6210_priv *wil, struct cfg80211_sched_scan_request *request) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct wmi_start_sched_scan_cmd cmd = { .min_rssi_threshold = S8_MIN, @@ -2549,7 +2727,8 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd), + rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid, + &cmd, sizeof(cmd), WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); if (rc) @@ -2566,6 +2745,7 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, int wmi_stop_sched_scan(struct wil6210_priv *wil) { + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); int rc; struct { struct wmi_cmd_hdr wmi; @@ -2577,7 +2757,7 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0, + rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0, WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); if (rc) diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig index a43cfd163254..3e684f8c1f93 100644 --- a/drivers/net/wireless/atmel/Kconfig +++ b/drivers/net/wireless/atmel/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_ATMEL If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_ATMEL diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index c9dd5e44c9c6..d122386c382b 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -3861,7 +3861,7 @@ static int reset_atmel_card(struct net_device *dev) set all the Mib values which matter in the card to match their settings in the atmel_private structure. Some of these - can be altered on the fly, but many (WEP, infrastucture or ad-hoc) + can be altered on the fly, but many (WEP, infrastructure or ad-hoc) can only be changed by tearing down the world and coming back through here. diff --git a/drivers/net/wireless/broadcom/Kconfig b/drivers/net/wireless/broadcom/Kconfig index d3651ceb5046..eebe2864835f 100644 --- a/drivers/net/wireless/broadcom/Kconfig +++ b/drivers/net/wireless/broadcom/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_BROADCOM If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_BROADCOM diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 2d3a5dd07a3f..1068a2a4494c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -445,6 +445,11 @@ brcmf_proto_bcdc_init_done(struct brcmf_pub *drvr) return 0; } +static void brcmf_proto_bcdc_debugfs_create(struct brcmf_pub *drvr) +{ + brcmf_fws_debugfs_create(drvr); +} + int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc; @@ -472,6 +477,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) drvr->proto->del_if = brcmf_proto_bcdc_del_if; drvr->proto->reset_if = brcmf_proto_bcdc_reset_if; drvr->proto->init_done = brcmf_proto_bcdc_init_done; + drvr->proto->debugfs_create = brcmf_proto_bcdc_debugfs_create; drvr->proto->pd = bcdc; drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 03aae6bc1838..372363a6e752 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -462,7 +462,7 @@ static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci) int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif, enum brcmf_btcoex_mode mode, u16 duration) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); struct brcmf_btcoex_info *btci = cfg->btcoex; struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 0b76a615708e..27e693e93f21 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -88,7 +88,7 @@ struct brcmf_bus_ops { void (*wowl_config)(struct device *dev, bool enabled); size_t (*get_ramsize)(struct device *dev); int (*get_memdump)(struct device *dev, void *data, size_t len); - int (*get_fwname)(struct device *dev, uint chip, uint chiprev, + int (*get_fwname)(struct device *dev, const char *ext, unsigned char *fw_name); }; @@ -140,6 +140,7 @@ struct brcmf_bus_stats { * @always_use_fws_queue: bus wants use queue also when fwsignal is inactive. * @wowl_supported: is wowl supported by bus driver. * @chiprev: revision of the dongle chip. + * @msgbuf: msgbuf protocol parameters provided by bus layer. */ struct brcmf_bus { union { @@ -228,10 +229,10 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len) } static inline -int brcmf_bus_get_fwname(struct brcmf_bus *bus, uint chip, uint chiprev, +int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext, unsigned char *fw_name) { - return bus->ops->get_fwname(bus->dev, chip, chiprev, fw_name); + return bus->ops->get_fwname(bus->dev, ext, fw_name); } /* @@ -253,7 +254,6 @@ void brcmf_dev_reset(struct device *dev); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); -int brcmf_bus_started(struct device *dev); s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len); void brcmf_bus_add_txhdrlen(struct device *dev, uint len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 15fa00d79fc6..89b86251910e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -753,7 +753,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = wdev->netdev; struct brcmf_if *ifp = netdev_priv(ndev); int ret; @@ -786,7 +786,7 @@ err_unarm: static int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = wdev->netdev; if (ndev && ndev == cfg_to_ndev(cfg)) @@ -831,7 +831,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, struct vif_params *params) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_vif *vif = ifp->vif; s32 infra = 0; @@ -2127,17 +2127,15 @@ static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, s32 *dbm) { - struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - struct net_device *ndev = cfg_to_ndev(cfg); - struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); s32 qdbm = 0; s32 err; brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) + if (!check_vif_up(vif)) return -EIO; - err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &qdbm); + err = brcmf_fil_iovar_int_get(vif->ifp, "qtxpower", &qdbm); if (err) { brcmf_err("error (%d)\n", err); goto done; @@ -3358,7 +3356,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, struct cfg80211_sched_scan_request *req) { struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n", req->n_match_sets, req->n_ssids); @@ -5124,6 +5122,9 @@ static int brcmf_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev, if (WARN_ON(ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_1X)) return -EINVAL; + if (conf->pmk_len > BRCMF_WSEC_MAX_PSK_LEN) + return -ERANGE; + return brcmf_set_pmk(ifp, conf->pmk, conf->pmk_len); } @@ -5187,6 +5188,12 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .del_pmk = brcmf_cfg80211_del_pmk, }; +struct cfg80211_ops *brcmf_cfg80211_get_ops(void) +{ + return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), + GFP_KERNEL); +} + struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, enum nl80211_iftype type) { @@ -5894,7 +5901,7 @@ static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel, static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap[]) { - struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); struct ieee80211_supported_band *band; struct ieee80211_channel *channel; struct wiphy *wiphy; @@ -6009,7 +6016,7 @@ fail_pbuf: static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) { - struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); struct ieee80211_supported_band *band; struct brcmf_fil_bwcap_le band_bwcap; struct brcmf_chanspec_list *list; @@ -6194,10 +6201,10 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band, } } -static int brcmf_setup_wiphybands(struct wiphy *wiphy) +static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); - struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); + struct wiphy *wiphy; u32 nmode = 0; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; @@ -6791,8 +6798,8 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2], static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *req) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); - struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); struct brcmf_fil_country_le ccreq; s32 err; int i; @@ -6802,7 +6809,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, return; /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) + for (i = 0; i < 2; i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n", req->alpha2[0], req->alpha2[1]); @@ -6827,7 +6834,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, brcmf_err("Firmware rejected country setting\n"); return; } - brcmf_setup_wiphybands(wiphy); + brcmf_setup_wiphybands(cfg); } static void brcmf_free_wiphy(struct wiphy *wiphy) @@ -6854,17 +6861,15 @@ static void brcmf_free_wiphy(struct wiphy *wiphy) if (wiphy->wowlan != &brcmf_wowlan_support) kfree(wiphy->wowlan); #endif - wiphy_free(wiphy); } struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, - struct device *busdev, + struct cfg80211_ops *ops, bool p2pdev_forced) { + struct wiphy *wiphy = drvr->wiphy; struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev; struct brcmf_cfg80211_info *cfg; - struct wiphy *wiphy; - struct cfg80211_ops *ops; struct brcmf_cfg80211_vif *vif; struct brcmf_if *ifp; s32 err = 0; @@ -6876,26 +6881,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, return NULL; } - ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL); - if (!ops) - return NULL; - - ifp = netdev_priv(ndev); -#ifdef CONFIG_PM - if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) - ops->set_rekey_data = brcmf_cfg80211_set_rekey_data; -#endif - wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info)); - if (!wiphy) { + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { brcmf_err("Could not allocate wiphy device\n"); - goto ops_out; + return NULL; } - memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN); - set_wiphy_dev(wiphy, busdev); - cfg = wiphy_priv(wiphy); cfg->wiphy = wiphy; - cfg->ops = ops; cfg->pub = drvr; init_vif_event(&cfg->vif_event); INIT_LIST_HEAD(&cfg->vif_list); @@ -6904,6 +6896,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, if (IS_ERR(vif)) goto wiphy_out; + ifp = netdev_priv(ndev); vif->ifp = ifp; vif->wdev.netdev = ndev; ndev->ieee80211_ptr = &vif->wdev; @@ -6930,6 +6923,11 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, if (err < 0) goto priv_out; + /* regulatory notifer below needs access to cfg so + * assign it now. + */ + drvr->config = cfg; + brcmf_dbg(INFO, "Registering custom regulatory\n"); wiphy->reg_notifier = brcmf_cfg80211_reg_notifier; wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; @@ -6943,13 +6941,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap; *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; } +#ifdef CONFIG_PM + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) + ops->set_rekey_data = brcmf_cfg80211_set_rekey_data; +#endif err = wiphy_register(wiphy); if (err < 0) { brcmf_err("Could not register wiphy device (%d)\n", err); goto priv_out; } - err = brcmf_setup_wiphybands(wiphy); + err = brcmf_setup_wiphybands(cfg); if (err) { brcmf_err("Setting wiphy bands failed (%d)\n", err); goto wiphy_unreg_out; @@ -6966,12 +6968,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, else *cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; } - /* p2p might require that "if-events" get processed by fweh. So - * activate the already registered event handlers now and activate - * the rest when initialization has completed. drvr->config needs to - * be assigned before activating events. - */ - drvr->config = cfg; + err = brcmf_fweh_activate_events(ifp); if (err) { brcmf_err("FWEH activation failed (%d)\n", err); @@ -7039,8 +7036,7 @@ priv_out: ifp->vif = NULL; wiphy_out: brcmf_free_wiphy(wiphy); -ops_out: - kfree(ops); + kfree(cfg); return NULL; } @@ -7055,4 +7051,5 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) kfree(cfg->ops); wl_deinit_priv(cfg); brcmf_free_wiphy(cfg->wiphy); + kfree(cfg); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index b5b5f0f10b63..a4aec0004e4f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -355,20 +355,24 @@ static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg) static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w) { - return (struct brcmf_cfg80211_info *)(wiphy_priv(w)); + struct brcmf_pub *drvr = wiphy_priv(w); + return drvr->config; } static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd) { - return (struct brcmf_cfg80211_info *)(wdev_priv(wd)); + return wiphy_to_cfg(wd->wiphy); +} + +static inline struct brcmf_cfg80211_vif *wdev_to_vif(struct wireless_dev *wdev) +{ + return container_of(wdev, struct brcmf_cfg80211_vif, wdev); } static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg) { - struct brcmf_cfg80211_vif *vif; - vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list); - return vif->wdev.netdev; + return brcmf_get_ifp(cfg->pub, 0)->ndev; } static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev) @@ -395,11 +399,12 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg) } struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, - struct device *busdev, + struct cfg80211_ops *ops, bool p2pdev_forced); void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev); +struct cfg80211_ops *brcmf_cfg80211_get_ops(void); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index f7b30ce2300d..3b829fed8631 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -464,12 +464,12 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset, ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); } -static char *brcmf_chip_name(uint chipid, char *buf, uint len) +char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len) { const char *fmt; - fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; - snprintf(buf, len, fmt, chipid); + fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u"; + snprintf(buf, len, fmt, id, rev); return buf; } @@ -924,10 +924,10 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; - brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name)); - brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n", - socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name, - ci->pub.chiprev); + brcmf_chip_name(ci->pub.chip, ci->pub.chiprev, + ci->pub.name, sizeof(ci->pub.name)); + brcmf_dbg(INFO, "found %s chip: %s\n", + socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name); if (socitype == SOCI_SB) { if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index dd0ec3eba6a9..0ae3b33bab62 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -45,7 +45,7 @@ struct brcmf_chip { u32 rambase; u32 ramsize; u32 srsize; - char name[8]; + char name[12]; }; /** @@ -93,5 +93,6 @@ void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset, void brcmf_chip_set_passive(struct brcmf_chip *ci); bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec); bool brcmf_chip_sr_capable(struct brcmf_chip *pub); +char *brcmf_chip_name(u32 chipid, u32 chiprev, char *buf, uint len); #endif /* BRCMF_AXIDMP_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 9be0b051066a..105b8774fca9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -30,6 +30,7 @@ #include "common.h" #include "of.h" #include "firmware.h" +#include "chip.h" MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); @@ -51,7 +52,7 @@ MODULE_PARM_DESC(txglomsz, "Maximum tx packet chain size [SDIO]"); /* Debug level configuration. See debug.h for bits, sysfs modifiable */ int brcmf_msg_level; -module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); +module_param_named(debug, brcmf_msg_level, int, 0600); MODULE_PARM_DESC(debug, "Level of debug output"); static int brcmf_p2p_enable; @@ -64,7 +65,7 @@ MODULE_PARM_DESC(feature_disable, "Disable features"); static char brcmf_firmware_path[BRCMF_FW_ALTPATH_LEN]; module_param_string(alternative_fw_path, brcmf_firmware_path, - BRCMF_FW_ALTPATH_LEN, S_IRUSR); + BRCMF_FW_ALTPATH_LEN, 0400); MODULE_PARM_DESC(alternative_fw_path, "Alternative firmware path"); static int brcmf_fcmode; @@ -72,9 +73,13 @@ module_param_named(fcmode, brcmf_fcmode, int, 0); MODULE_PARM_DESC(fcmode, "Mode of firmware signalled flow control"); static int brcmf_roamoff; -module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); +module_param_named(roamoff, brcmf_roamoff, int, 0400); MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine"); +static int brcmf_iapp_enable; +module_param_named(iapp, brcmf_iapp_enable, int, 0); +MODULE_PARM_DESC(iapp, "Enable partial support for the obsoleted Inter-Access Point Protocol"); + #ifdef DEBUG /* always succeed brcmf_bus_started() */ static int brcmf_ignore_probe_fail; @@ -124,43 +129,9 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag, return err; } -static int brcmf_c_get_clm_name(struct brcmf_if *ifp, u8 *clm_name) -{ - struct brcmf_bus *bus = ifp->drvr->bus_if; - struct brcmf_rev_info *ri = &ifp->drvr->revinfo; - u8 fw_name[BRCMF_FW_NAME_LEN]; - u8 *ptr; - size_t len; - s32 err; - - memset(fw_name, 0, BRCMF_FW_NAME_LEN); - err = brcmf_bus_get_fwname(bus, ri->chipnum, ri->chiprev, fw_name); - if (err) { - brcmf_err("get firmware name failed (%d)\n", err); - goto done; - } - - /* generate CLM blob file name */ - ptr = strrchr(fw_name, '.'); - if (!ptr) { - err = -ENOENT; - goto done; - } - - len = ptr - fw_name + 1; - if (len + strlen(".clm_blob") > BRCMF_FW_NAME_LEN) { - err = -E2BIG; - } else { - strlcpy(clm_name, fw_name, len); - strlcat(clm_name, ".clm_blob", BRCMF_FW_NAME_LEN); - } -done: - return err; -} - static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) { - struct device *dev = ifp->drvr->bus_if->dev; + struct brcmf_bus *bus = ifp->drvr->bus_if; struct brcmf_dload_data_le *chunk_buf; const struct firmware *clm = NULL; u8 clm_name[BRCMF_FW_NAME_LEN]; @@ -173,16 +144,16 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) brcmf_dbg(TRACE, "Enter\n"); - memset(clm_name, 0, BRCMF_FW_NAME_LEN); - err = brcmf_c_get_clm_name(ifp, clm_name); + memset(clm_name, 0, sizeof(clm_name)); + err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name); if (err) { brcmf_err("get CLM blob file name failed (%d)\n", err); return err; } - err = request_firmware(&clm, clm_name, dev); + err = request_firmware(&clm, clm_name, bus->dev); if (err) { - brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n", + brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", err); return 0; } @@ -234,6 +205,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) { s8 eventmask[BRCMF_EVENTING_MASK_LEN]; u8 buf[BRCMF_DCMD_SMLEN]; + struct brcmf_bus *bus; struct brcmf_rev_info_le revinfo; struct brcmf_rev_info *ri; char *clmver; @@ -247,18 +219,21 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) brcmf_err("Retreiving cur_etheraddr failed, %d\n", err); goto done; } + memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); + bus = ifp->drvr->bus_if; + ri = &ifp->drvr->revinfo; + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO, &revinfo, sizeof(revinfo)); - ri = &ifp->drvr->revinfo; if (err < 0) { brcmf_err("retrieving revision info failed, %d\n", err); + strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname)); } else { ri->vendorid = le32_to_cpu(revinfo.vendorid); ri->deviceid = le32_to_cpu(revinfo.deviceid); ri->radiorev = le32_to_cpu(revinfo.radiorev); - ri->chiprev = le32_to_cpu(revinfo.chiprev); ri->corerev = le32_to_cpu(revinfo.corerev); ri->boardid = le32_to_cpu(revinfo.boardid); ri->boardvendor = le32_to_cpu(revinfo.boardvendor); @@ -266,15 +241,24 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) ri->driverrev = le32_to_cpu(revinfo.driverrev); ri->ucoderev = le32_to_cpu(revinfo.ucoderev); ri->bus = le32_to_cpu(revinfo.bus); - ri->chipnum = le32_to_cpu(revinfo.chipnum); ri->phytype = le32_to_cpu(revinfo.phytype); ri->phyrev = le32_to_cpu(revinfo.phyrev); ri->anarev = le32_to_cpu(revinfo.anarev); ri->chippkg = le32_to_cpu(revinfo.chippkg); ri->nvramrev = le32_to_cpu(revinfo.nvramrev); + + /* use revinfo if not known yet */ + if (!bus->chip) { + bus->chip = le32_to_cpu(revinfo.chipnum); + bus->chiprev = le32_to_cpu(revinfo.chiprev); + } } ri->result = err; + if (bus->chip) + brcmf_chip_name(bus->chip, bus->chiprev, + ri->chipname, sizeof(ri->chipname)); + /* Do any CLM downloading */ err = brcmf_c_process_clm_blob(ifp); if (err < 0) { @@ -295,7 +279,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) strsep(&ptr, "\n"); /* Print fw version info */ - brcmf_info("Firmware version = %s\n", buf); + brcmf_info("Firmware: %s %s\n", ri->chipname, buf); /* locate firmware version number for ethtool */ ptr = strrchr(buf, ' ') + 1; @@ -365,9 +349,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* Enable tx beamforming, errors can be ignored (not supported) */ (void)brcmf_fil_iovar_int_set(ifp, "txbf", 1); - - /* do bus specific preinit here */ - err = brcmf_bus_preinit(ifp->drvr->bus_if); done: return err; } @@ -441,6 +422,7 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, settings->feature_disable = brcmf_feature_disable; settings->fcmode = brcmf_fcmode; settings->roamoff = !!brcmf_roamoff; + settings->iapp = !!brcmf_iapp_enable; #ifdef DEBUG settings->ignore_probe_fail = !!brcmf_ignore_probe_fail; #endif @@ -514,9 +496,6 @@ static int __init brcmfmac_module_init(void) { int err; - /* Initialize debug system first */ - brcmf_debugfs_init(); - /* Get the platform data (if available) for our devices */ err = platform_driver_probe(&brcmf_pd, brcmf_common_pd_probe); if (err == -ENODEV) @@ -528,7 +507,6 @@ static int __init brcmfmac_module_init(void) /* Continue the initialization by registering the different busses */ err = brcmf_core_init(); if (err) { - brcmf_debugfs_exit(); if (brcmfmac_pdata) platform_driver_unregister(&brcmf_pd); } @@ -541,7 +519,6 @@ static void __exit brcmfmac_module_exit(void) brcmf_core_exit(); if (brcmfmac_pdata) platform_driver_unregister(&brcmf_pd); - brcmf_debugfs_exit(); } module_init(brcmfmac_module_init); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index a62f8e70b320..ef914619e8e1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -58,6 +58,7 @@ struct brcmf_mp_device { unsigned int feature_disable; int fcmode; bool roamoff; + bool iapp; bool ignore_probe_fail; struct brcmfmac_pd_cc *country_codes; union { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 930e423f83a8..8d4511eaa9b9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -230,6 +230,37 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev) schedule_work(&ifp->multicast_work); } +/** + * brcmf_skb_is_iapp - checks if skb is an IAPP packet + * + * @skb: skb to check + */ +static bool brcmf_skb_is_iapp(struct sk_buff *skb) +{ + static const u8 iapp_l2_update_packet[6] __aligned(2) = { + 0x00, 0x01, 0xaf, 0x81, 0x01, 0x00, + }; + unsigned char *eth_data; +#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + const u16 *a, *b; +#endif + + if (skb->len - skb->mac_len != 6 || + !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) + return false; + + eth_data = skb_mac_header(skb) + ETH_HLEN; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return !(((*(const u32 *)eth_data) ^ (*(const u32 *)iapp_l2_update_packet)) | + ((*(const u16 *)(eth_data + 4)) ^ (*(const u16 *)(iapp_l2_update_packet + 4)))); +#else + a = (const u16 *)eth_data; + b = (const u16 *)iapp_l2_update_packet; + + return !((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])); +#endif +} + static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -250,6 +281,23 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } + /* Some recent Broadcom's firmwares disassociate STA when they receive + * an 802.11f ADD frame. This behavior can lead to a local DoS security + * issue. Attacker may trigger disassociation of any STA by sending a + * proper Ethernet frame to the wireless interface. + * + * Moreover this feature may break AP interfaces in some specific + * setups. This applies e.g. to the bridge with hairpin mode enabled and + * IFLA_BRPORT_MCAST_TO_UCAST set. IAPP packet generated by a firmware + * will get passed back to the wireless interface and cause immediate + * disassociation of a just-connected STA. + */ + if (!drvr->settings->iapp && brcmf_skb_is_iapp(skb)) { + dev_kfree_skb(skb); + ret = -EINVAL; + goto done; + } + /* Make sure there's enough writeable headroom */ if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); @@ -325,6 +373,15 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { + /* Most of Broadcom's firmwares send 802.11f ADD frame every time a new + * STA connects to the AP interface. This is an obsoleted standard most + * users don't use, so don't pass these frames up unless requested. + */ + if (!ifp->drvr->settings->iapp && brcmf_skb_is_iapp(skb)) { + brcmu_pkt_buf_free_skb(skb); + return; + } + if (skb->pkt_type == PACKET_MULTICAST) ifp->ndev->stats.multicast++; @@ -914,55 +971,6 @@ static int brcmf_inet6addr_changed(struct notifier_block *nb, } #endif -int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) -{ - struct brcmf_pub *drvr = NULL; - int ret = 0; - int i; - - brcmf_dbg(TRACE, "Enter\n"); - - /* Allocate primary brcmf_info */ - drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC); - if (!drvr) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++) - drvr->if2bss[i] = BRCMF_BSSIDX_INVALID; - - mutex_init(&drvr->proto_block); - - /* Link to bus module */ - drvr->hdrlen = 0; - drvr->bus_if = dev_get_drvdata(dev); - drvr->bus_if->drvr = drvr; - drvr->settings = settings; - - /* attach debug facilities */ - brcmf_debug_attach(drvr); - - /* Attach and link in the protocol */ - ret = brcmf_proto_attach(drvr); - if (ret != 0) { - brcmf_err("brcmf_prot_attach failed\n"); - goto fail; - } - - /* Attach to events important for core code */ - brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, - brcmf_psm_watchdog_notify); - - /* attach firmware event handler */ - brcmf_fweh_attach(drvr); - - return ret; - -fail: - brcmf_detach(dev); - - return ret; -} - static int brcmf_revinfo_read(struct seq_file *s, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(s->private); @@ -973,8 +981,7 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data) seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid); seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid); seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev)); - seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum); - seq_printf(s, "chiprev: %u\n", ri->chiprev); + seq_printf(s, "chip: %s\n", ri->chipname); seq_printf(s, "chippkg: %u\n", ri->chippkg); seq_printf(s, "corerev: %u\n", ri->corerev); seq_printf(s, "boardid: 0x%04x\n", ri->boardid); @@ -993,11 +1000,10 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data) return 0; } -int brcmf_bus_started(struct device *dev) +static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) { int ret = -1; - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_bus *bus_if = drvr->bus_if; struct brcmf_if *ifp; struct brcmf_if *p2p_ifp; @@ -1013,20 +1019,16 @@ int brcmf_bus_started(struct device *dev) /* signal bus ready */ brcmf_bus_change_state(bus_if, BRCMF_BUS_UP); + /* do bus specific preinit here */ + ret = brcmf_bus_preinit(bus_if); + if (ret < 0) + goto fail; + /* Bus is ready, do any initialization */ ret = brcmf_c_preinit_dcmds(ifp); if (ret < 0) goto fail; - brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); - - /* assure we have chipid before feature attach */ - if (!bus_if->chip) { - bus_if->chip = drvr->revinfo.chipnum; - bus_if->chiprev = drvr->revinfo.chiprev; - brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n", - bus_if->chip, bus_if->chip, bus_if->chiprev); - } brcmf_feat_attach(drvr); ret = brcmf_proto_init_done(drvr); @@ -1035,7 +1037,7 @@ int brcmf_bus_started(struct device *dev) brcmf_proto_add_if(drvr, ifp); - drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev, + drvr->config = brcmf_cfg80211_attach(drvr, ops, drvr->settings->p2p_enable); if (drvr->config == NULL) { ret = -ENOMEM; @@ -1069,6 +1071,11 @@ int brcmf_bus_started(struct device *dev) #endif #endif /* CONFIG_INET */ + /* populate debugfs */ + brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); + brcmf_feat_debugfs_create(drvr); + brcmf_proto_debugfs_create(drvr); + return 0; fail: @@ -1088,6 +1095,69 @@ fail: return ret; } +int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) +{ + struct wiphy *wiphy; + struct cfg80211_ops *ops; + struct brcmf_pub *drvr = NULL; + int ret = 0; + int i; + + brcmf_dbg(TRACE, "Enter\n"); + + ops = brcmf_cfg80211_get_ops(); + if (!ops) + return -ENOMEM; + + wiphy = wiphy_new(ops, sizeof(*drvr)); + if (!wiphy) + return -ENOMEM; + + set_wiphy_dev(wiphy, dev); + drvr = wiphy_priv(wiphy); + drvr->wiphy = wiphy; + + for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++) + drvr->if2bss[i] = BRCMF_BSSIDX_INVALID; + + mutex_init(&drvr->proto_block); + + /* Link to bus module */ + drvr->hdrlen = 0; + drvr->bus_if = dev_get_drvdata(dev); + drvr->bus_if->drvr = drvr; + drvr->settings = settings; + + /* Attach and link in the protocol */ + ret = brcmf_proto_attach(drvr); + if (ret != 0) { + brcmf_err("brcmf_prot_attach failed\n"); + goto fail; + } + + /* Attach to events important for core code */ + brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, + brcmf_psm_watchdog_notify); + + /* attach firmware event handler */ + brcmf_fweh_attach(drvr); + + ret = brcmf_bus_started(drvr, ops); + if (ret != 0) { + brcmf_err("dongle is not responding: err=%d\n", ret); + goto fail; + } + + drvr->config->ops = ops; + return 0; + +fail: + brcmf_detach(dev); + kfree(ops); + + return ret; +} + void brcmf_bus_add_txhdrlen(struct device *dev, uint len) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -1141,14 +1211,14 @@ void brcmf_detach(struct device *dev) brcmf_remove_interface(drvr->iflist[i], false); brcmf_cfg80211_detach(drvr->config); + drvr->config = NULL; brcmf_bus_stop(drvr->bus_if); brcmf_proto_detach(drvr); - brcmf_debug_detach(drvr); bus_if->drvr = NULL; - kfree(drvr); + wiphy_free(drvr->wiphy); } s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len) @@ -1185,6 +1255,12 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state) int ifidx; brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state); + + if (!drvr) { + brcmf_dbg(INFO, "ignoring transition, bus not attached yet\n"); + return; + } + bus->state = state; if (state == BRCMF_BUS_UP) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index df8a1ecb9924..401f50458686 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -87,7 +87,6 @@ struct brcmf_rev_info { u32 vendorid; u32 deviceid; u32 radiorev; - u32 chiprev; u32 corerev; u32 boardid; u32 boardvendor; @@ -95,7 +94,7 @@ struct brcmf_rev_info { u32 driverrev; u32 ucoderev; u32 bus; - u32 chipnum; + char chipname[12]; u32 phytype; u32 phyrev; u32 anarev; @@ -108,6 +107,7 @@ struct brcmf_pub { /* Linkage ponters */ struct brcmf_bus *bus_if; struct brcmf_proto *proto; + struct wiphy *wiphy; struct brcmf_cfg80211_info *config; /* Internal brcmf items */ @@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason { * @netif_stop_lock: spinlock for update netif_stop from multiple sources. * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. * @pend_8021x_wait: used for signalling change in count. + * @fwil_fwerr: flag indicating fwil layer should return firmware error codes. */ struct brcmf_if { struct brcmf_pub *drvr; @@ -198,6 +199,7 @@ struct brcmf_if { wait_queue_head_t pend_8021x_wait; struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; u8 ipv6addr_idx; + bool fwil_fwerr; }; int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 2d3e5e263a32..504832084eca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -25,8 +25,6 @@ #include "fweh.h" #include "debug.h" -static struct dentry *root_folder; - int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, size_t len) { @@ -54,44 +52,9 @@ int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, return 0; } -void brcmf_debugfs_init(void) -{ - root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(root_folder)) - root_folder = NULL; -} - -void brcmf_debugfs_exit(void) -{ - if (!root_folder) - return; - - debugfs_remove_recursive(root_folder); - root_folder = NULL; -} - -int brcmf_debug_attach(struct brcmf_pub *drvr) -{ - struct device *dev = drvr->bus_if->dev; - - if (!root_folder) - return -ENODEV; - - drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder); - return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); -} - -void brcmf_debug_detach(struct brcmf_pub *drvr) -{ - brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG); - - if (!IS_ERR_OR_NULL(drvr->dbgfs_dir)) - debugfs_remove_recursive(drvr->dbgfs_dir); -} - struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr) { - return drvr->dbgfs_dir; + return drvr->wiphy->debugfsdir; } int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, @@ -99,7 +62,8 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, { struct dentry *e; + WARN(!drvr->wiphy->debugfsdir, "wiphy not (yet) registered\n"); e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn, - drvr->dbgfs_dir, read_fn); + drvr->wiphy->debugfsdir, read_fn); return PTR_ERR_OR_ZERO(e); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 35919d9e8e13..cfed0626bf5a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -113,29 +113,12 @@ extern int brcmf_msg_level; struct brcmf_bus; struct brcmf_pub; #ifdef DEBUG -void brcmf_debugfs_init(void); -void brcmf_debugfs_exit(void); -int brcmf_debug_attach(struct brcmf_pub *drvr); -void brcmf_debug_detach(struct brcmf_pub *drvr); struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)); int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, size_t len); #else -static inline void brcmf_debugfs_init(void) -{ -} -static inline void brcmf_debugfs_exit(void) -{ -} -static inline int brcmf_debug_attach(struct brcmf_pub *drvr) -{ - return 0; -} -static inline void brcmf_debug_detach(struct brcmf_pub *drvr) -{ -} static inline int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 47de35a33853..876731c57bf5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, u32 data; int err; + /* we need to know firmware error */ + ifp->fwil_fwerr = true; + err = brcmf_fil_iovar_int_get(ifp, name, &data); if (err == 0) { brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); @@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, brcmf_dbg(TRACE, "%s feature check failed: %d\n", brcmf_feat_names[id], err); } + + ifp->fwil_fwerr = false; } static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, @@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, { int err; + /* we need to know firmware error */ + ifp->fwil_fwerr = true; + err = brcmf_fil_iovar_data_set(ifp, name, data, len); if (err != -BRCMF_FW_UNSUPPORTED) { brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); @@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, brcmf_dbg(TRACE, "%s feature check failed: %d\n", brcmf_feat_names[id], err); } + + ifp->fwil_fwerr = false; } #define MAX_CAPS_BUFFER_SIZE 512 @@ -218,7 +228,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) /* no quirks */ break; } +} +void brcmf_feat_debugfs_create(struct brcmf_pub *drvr) +{ brcmf_debugfs_add_entry(drvr, "features", brcmf_feat_debugfs_read); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 1ab4f1617112..d1193825e559 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -90,6 +90,13 @@ enum brcmf_feat_quirk { void brcmf_feat_attach(struct brcmf_pub *drvr); /** + * brcmf_feat_debugfs_create() - create debugfs entries. + * + * @drvr: driver instance. + */ +void brcmf_feat_debugfs_create(struct brcmf_pub *drvr); + +/** * brcmf_feat_is_enabled() - query feature. * * @ifp: interface instance. diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 091b52979e03..9277f4c2bfeb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -25,6 +25,7 @@ #include "firmware.h" #include "core.h" #include "common.h" +#include "chip.h" #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ @@ -437,18 +438,31 @@ void brcmf_fw_nvram_free(void *nvram) struct brcmf_fw { struct device *dev; - u16 flags; - const struct firmware *code; - const char *nvram_name; - u16 domain_nr; - u16 bus_nr; - void (*done)(struct device *dev, int err, const struct firmware *fw, - void *nvram_image, u32 nvram_len); + struct brcmf_fw_request *req; + u32 curpos; + void (*done)(struct device *dev, int err, struct brcmf_fw_request *req); }; +static void brcmf_fw_request_done(const struct firmware *fw, void *ctx); + +static void brcmf_fw_free_request(struct brcmf_fw_request *req) +{ + struct brcmf_fw_item *item; + int i; + + for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) { + if (item->type == BRCMF_FW_TYPE_BINARY) + release_firmware(item->binary); + else if (item->type == BRCMF_FW_TYPE_NVRAM) + brcmf_fw_nvram_free(item->nv_data.data); + } + kfree(req); +} + static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; + struct brcmf_fw_item *cur; u32 nvram_length = 0; void *nvram = NULL; u8 *data = NULL; @@ -456,83 +470,150 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) bool raw_nvram; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); + + cur = &fwctx->req->items[fwctx->curpos]; + if (fw && fw->data) { data = (u8 *)fw->data; data_len = fw->size; raw_nvram = false; } else { data = bcm47xx_nvram_get_contents(&data_len); - if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) + if (!data && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) goto fail; raw_nvram = true; } if (data) nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length, - fwctx->domain_nr, fwctx->bus_nr); + fwctx->req->domain_nr, + fwctx->req->bus_nr); if (raw_nvram) bcm47xx_nvram_release_contents(data); release_firmware(fw); - if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) + if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) goto fail; - fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); - kfree(fwctx); + brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); + cur->nv_data.data = nvram; + cur->nv_data.len = nvram_length; return; fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - release_firmware(fwctx->code); - fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); + fwctx->done(fwctx->dev, -ENOENT, NULL); + brcmf_fw_free_request(fwctx->req); kfree(fwctx); } -static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) +static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) +{ + struct brcmf_fw_item *cur; + const struct firmware *fw = NULL; + int ret; + + cur = &fwctx->req->items[fwctx->curpos]; + + brcmf_dbg(TRACE, "%srequest for %s\n", async ? "async " : "", + cur->path); + + if (async) + ret = request_firmware_nowait(THIS_MODULE, true, cur->path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done); + else + ret = request_firmware(&fw, cur->path, fwctx->dev); + + if (ret < 0) { + brcmf_fw_request_done(NULL, fwctx); + } else if (!async && fw) { + brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, + fw ? "" : "not "); + if (cur->type == BRCMF_FW_TYPE_BINARY) + cur->binary = fw; + else if (cur->type == BRCMF_FW_TYPE_NVRAM) + brcmf_fw_request_nvram_done(fw, fwctx); + else + release_firmware(fw); + + return -EAGAIN; + } + return 0; +} + +static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; + struct brcmf_fw_item *cur; int ret = 0; - brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); - if (!fw) { + cur = &fwctx->req->items[fwctx->curpos]; + + brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, + fw ? "" : "not "); + + if (fw) { + if (cur->type == BRCMF_FW_TYPE_BINARY) + cur->binary = fw; + else if (cur->type == BRCMF_FW_TYPE_NVRAM) + brcmf_fw_request_nvram_done(fw, fwctx); + else + release_firmware(fw); + } else if (cur->type == BRCMF_FW_TYPE_NVRAM) { + brcmf_fw_request_nvram_done(NULL, fwctx); + } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) { ret = -ENOENT; goto fail; } - /* only requested code so done here */ - if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) - goto done; - fwctx->code = fw; - ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, - fwctx->dev, GFP_KERNEL, fwctx, - brcmf_fw_request_nvram_done); + do { + if (++fwctx->curpos == fwctx->req->n_items) { + ret = 0; + goto done; + } + + ret = brcmf_fw_request_next_item(fwctx, false); + } while (ret == -EAGAIN); - /* pass NULL to nvram callback for bcm47xx fallback */ - if (ret) - brcmf_fw_request_nvram_done(NULL, fwctx); return; fail: - brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); + brcmf_dbg(TRACE, "failed err=%d: dev=%s, fw=%s\n", ret, + dev_name(fwctx->dev), cur->path); + brcmf_fw_free_request(fwctx->req); + fwctx->req = NULL; done: - fwctx->done(fwctx->dev, ret, fw, NULL, 0); + fwctx->done(fwctx->dev, ret, fwctx->req); kfree(fwctx); } -int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, - const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, int err, - const struct firmware *fw, - void *nvram_image, u32 nvram_len), - u16 domain_nr, u16 bus_nr) +static bool brcmf_fw_request_is_valid(struct brcmf_fw_request *req) +{ + struct brcmf_fw_item *item; + int i; + + if (!req->n_items) + return false; + + for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) { + if (!item->path) + return false; + } + return true; +} + +int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, + void (*fw_cb)(struct device *dev, int err, + struct brcmf_fw_request *req)) { struct brcmf_fw *fwctx; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); - if (!fw_cb || !code) + if (!fw_cb) return -EINVAL; - if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram) + if (!brcmf_fw_request_is_valid(req)) return -EINVAL; fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL); @@ -540,35 +621,25 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, return -ENOMEM; fwctx->dev = dev; - fwctx->flags = flags; + fwctx->req = req; fwctx->done = fw_cb; - if (flags & BRCMF_FW_REQUEST_NVRAM) - fwctx->nvram_name = nvram; - fwctx->domain_nr = domain_nr; - fwctx->bus_nr = bus_nr; - - return request_firmware_nowait(THIS_MODULE, true, code, dev, - GFP_KERNEL, fwctx, - brcmf_fw_request_code_done); -} -int brcmf_fw_get_firmwares(struct device *dev, u16 flags, - const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, int err, - const struct firmware *fw, - void *nvram_image, u32 nvram_len)) -{ - return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0, - 0); + brcmf_fw_request_next_item(fwctx, true); + return 0; } -int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], - u32 table_size, char fw_name[BRCMF_FW_NAME_LEN], - char nvram_name[BRCMF_FW_NAME_LEN]) +struct brcmf_fw_request * +brcmf_fw_alloc_request(u32 chip, u32 chiprev, + struct brcmf_firmware_mapping mapping_table[], + u32 table_size, struct brcmf_fw_name *fwnames, + u32 n_fwnames) { - u32 i; + struct brcmf_fw_request *fwreq; + char chipname[12]; + const char *mp_path; + u32 i, j; char end; + size_t reqsz; for (i = 0; i < table_size; i++) { if (mapping_table[i].chipid == chip && @@ -578,32 +649,41 @@ int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, if (i == table_size) { brcmf_err("Unknown chipid %d [%d]\n", chip, chiprev); - return -ENODEV; + return NULL; } - /* check if firmware path is provided by module parameter */ - if (brcmf_mp_global.firmware_path[0] != '\0') { - strlcpy(fw_name, brcmf_mp_global.firmware_path, - BRCMF_FW_NAME_LEN); - if ((nvram_name) && (mapping_table[i].nvram)) - strlcpy(nvram_name, brcmf_mp_global.firmware_path, + reqsz = sizeof(*fwreq) + n_fwnames * sizeof(struct brcmf_fw_item); + fwreq = kzalloc(reqsz, GFP_KERNEL); + if (!fwreq) + return NULL; + + brcmf_chip_name(chip, chiprev, chipname, sizeof(chipname)); + + brcmf_info("using %s for chip %s\n", + mapping_table[i].fw_base, chipname); + + mp_path = brcmf_mp_global.firmware_path; + end = mp_path[strlen(mp_path) - 1]; + fwreq->n_items = n_fwnames; + + for (j = 0; j < n_fwnames; j++) { + fwreq->items[j].path = fwnames[j].path; + /* check if firmware path is provided by module parameter */ + if (brcmf_mp_global.firmware_path[0] != '\0') { + strlcpy(fwnames[j].path, mp_path, BRCMF_FW_NAME_LEN); - end = brcmf_mp_global.firmware_path[ - strlen(brcmf_mp_global.firmware_path) - 1]; - if (end != '/') { - strlcat(fw_name, "/", BRCMF_FW_NAME_LEN); - if ((nvram_name) && (mapping_table[i].nvram)) - strlcat(nvram_name, "/", BRCMF_FW_NAME_LEN); + if (end != '/') { + strlcat(fwnames[j].path, "/", + BRCMF_FW_NAME_LEN); + } } + strlcat(fwnames[j].path, mapping_table[i].fw_base, + BRCMF_FW_NAME_LEN); + strlcat(fwnames[j].path, fwnames[j].extension, + BRCMF_FW_NAME_LEN); + fwreq->items[j].path = fwnames[j].path; } - strlcat(fw_name, mapping_table[i].fw, BRCMF_FW_NAME_LEN); - if ((nvram_name) && (mapping_table[i].nvram)) - strlcat(nvram_name, mapping_table[i].nvram, BRCMF_FW_NAME_LEN); - brcmf_info("using %s for chip %#08x(%d) rev %#08x\n", - fw_name, chip, chip, chiprev); - - return 0; + return fwreq; } - diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 8fa4b7e1ab3d..79a21095c349 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -16,10 +16,7 @@ #ifndef BRCMFMAC_FIRMWARE_H #define BRCMFMAC_FIRMWARE_H -#define BRCMF_FW_REQUEST 0x000F -#define BRCMF_FW_REQUEST_NVRAM 0x0001 -#define BRCMF_FW_REQ_FLAGS 0x00F0 -#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010 +#define BRCMF_FW_REQF_OPTIONAL 0x0001 #define BRCMF_FW_NAME_LEN 320 @@ -38,49 +35,62 @@ struct brcmf_firmware_mapping { u32 chipid; u32 revmask; - const char *fw; - const char *nvram; + const char *fw_base; }; -#define BRCMF_FW_NVRAM_DEF(fw_nvram_name, fw, nvram) \ -static const char BRCM_ ## fw_nvram_name ## _FIRMWARE_NAME[] = \ - BRCMF_FW_DEFAULT_PATH fw; \ -static const char BRCM_ ## fw_nvram_name ## _NVRAM_NAME[] = \ - BRCMF_FW_DEFAULT_PATH nvram; \ -MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw); - -#define BRCMF_FW_DEF(fw_name, fw) \ -static const char BRCM_ ## fw_name ## _FIRMWARE_NAME[] = \ - BRCMF_FW_DEFAULT_PATH fw; \ -MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw) \ - -#define BRCMF_FW_NVRAM_ENTRY(chipid, mask, name) \ - { chipid, mask, \ - BRCM_ ## name ## _FIRMWARE_NAME, BRCM_ ## name ## _NVRAM_NAME } +#define BRCMF_FW_DEF(fw_name, fw_base) \ +static const char BRCM_ ## fw_name ## _FIRMWARE_BASENAME[] = \ + BRCMF_FW_DEFAULT_PATH fw_base; \ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".bin") #define BRCMF_FW_ENTRY(chipid, mask, name) \ - { chipid, mask, BRCM_ ## name ## _FIRMWARE_NAME, NULL } + { chipid, mask, BRCM_ ## name ## _FIRMWARE_BASENAME } -int brcmf_fw_map_chip_to_name(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], - u32 table_size, char fw_name[BRCMF_FW_NAME_LEN], - char nvram_name[BRCMF_FW_NAME_LEN]); void brcmf_fw_nvram_free(void *nvram); + +enum brcmf_fw_type { + BRCMF_FW_TYPE_BINARY, + BRCMF_FW_TYPE_NVRAM +}; + +struct brcmf_fw_item { + const char *path; + enum brcmf_fw_type type; + u16 flags; + union { + const struct firmware *binary; + struct { + void *data; + u32 len; + } nv_data; + }; +}; + +struct brcmf_fw_request { + u16 domain_nr; + u16 bus_nr; + u32 n_items; + struct brcmf_fw_item items[0]; +}; + +struct brcmf_fw_name { + const char *extension; + char *path; +}; + +struct brcmf_fw_request * +brcmf_fw_alloc_request(u32 chip, u32 chiprev, + struct brcmf_firmware_mapping mapping_table[], + u32 table_size, struct brcmf_fw_name *fwnames, + u32 n_fwnames); + /* * Request firmware(s) asynchronously. When the asynchronous request * fails it will not use the callback, but call device_release_driver() * instead which will call the driver .remove() callback. */ -int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, - const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, int err, - const struct firmware *fw, - void *nvram_image, u32 nvram_len), - u16 domain_nr, u16 bus_nr); -int brcmf_fw_get_firmwares(struct device *dev, u16 flags, - const char *code, const char *nvram, +int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, void (*fw_cb)(struct device *dev, int err, - const struct firmware *fw, - void *nvram_image, u32 nvram_len)); + struct brcmf_fw_request *req)); #endif /* BRCMFMAC_FIRMWARE_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index f2cfdd3b2bf1..802d7cb73b80 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -124,13 +124,15 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) data, len, &fwerr); if (err) { - brcmf_dbg(FIL, "Failed: %s (%d)\n", - brcmf_fil_get_errstr((u32)(-err)), err); + brcmf_dbg(FIL, "Failed: error=%d\n", err); } else if (fwerr < 0) { brcmf_dbg(FIL, "Firmware error: %s (%d)\n", brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); err = -EBADE; } + if (ifp->fwil_fwerr) + return fwerr; + return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index f59642b2c935..f3cbf78c8899 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2399,10 +2399,6 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); - /* create debugfs file for statistics */ - brcmf_debugfs_add_entry(drvr, "fws_stats", - brcmf_debugfs_fws_stats_read); - brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", fws->fw_signals ? "enabled" : "disabled", tlv); return fws; @@ -2429,6 +2425,13 @@ void brcmf_fws_detach(struct brcmf_fws_info *fws) kfree(fws); } +void brcmf_fws_debugfs_create(struct brcmf_pub *drvr) +{ + /* create debugfs file for statistics */ + brcmf_debugfs_add_entry(drvr, "fws_stats", + brcmf_debugfs_fws_stats_read); +} + bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws) { return !fws->avoid_queueing; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index ba07bd972002..4e6835766d5d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -20,6 +20,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); void brcmf_fws_detach(struct brcmf_fws_info *fws); +void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index e212a791a072..49d37ad96958 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1418,6 +1418,11 @@ static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) } #endif +static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) +{ + brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); +} + int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) { struct brcmf_bus_msgbuf *if_msgbuf; @@ -1472,6 +1477,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; + drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; drvr->proto->pd = msgbuf; init_waitqueue_head(&msgbuf->ioctl_resp_wait); @@ -1525,8 +1531,6 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) spin_lock_init(&msgbuf->flowring_work_lock); INIT_LIST_HEAD(&msgbuf->work_queue); - brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); - return 0; fail: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 2ee54133efa1..bcef208a81a5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) * @dev_addr: optional device address. * * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address. */ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) { - struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - bool local_admin = false; + bool random_addr = false; - if (!dev_addr || is_zero_ether_addr(dev_addr)) { - dev_addr = pri_ifp->mac_addr; - local_admin = true; - } + if (!dev_addr || is_zero_ether_addr(dev_addr)) + random_addr = true; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. */ - memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); - if (local_admin) - p2p->dev_addr[0] |= 0x02; + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be @@ -2229,7 +2227,7 @@ fail: */ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_p2p_info *p2p = &cfg->p2p; struct brcmf_cfg80211_vif *vif; enum nl80211_iftype iftype; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 8752707557bf..091c191ce259 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -46,36 +46,36 @@ enum brcmf_pcie_state { BRCMFMAC_PCIE_STATE_UP }; -BRCMF_FW_NVRAM_DEF(43602, "brcmfmac43602-pcie.bin", "brcmfmac43602-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4350, "brcmfmac4350-pcie.bin", "brcmfmac4350-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4350C, "brcmfmac4350c2-pcie.bin", "brcmfmac4350c2-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-pcie.bin", "brcmfmac4356-pcie.txt"); -BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt"); -BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt"); +BRCMF_FW_DEF(43602, "brcmfmac43602-pcie"); +BRCMF_FW_DEF(4350, "brcmfmac4350-pcie"); +BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie"); +BRCMF_FW_DEF(4356, "brcmfmac4356-pcie"); +BRCMF_FW_DEF(43570, "brcmfmac43570-pcie"); +BRCMF_FW_DEF(4358, "brcmfmac4358-pcie"); +BRCMF_FW_DEF(4359, "brcmfmac4359-pcie"); +BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie"); +BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie"); +BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); +BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); +BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), + BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), + BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), + BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), + BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), + BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), + BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), + BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), + BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), + BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), + BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), + BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), + BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), + BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), + BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), }; #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ @@ -1350,23 +1350,24 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len) return 0; } -static int brcmf_pcie_get_fwname(struct device *dev, u32 chip, u32 chiprev, - u8 *fw_name) +static +int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; - struct brcmf_pciedev_info *devinfo = buspub->devinfo; - int ret = 0; - - if (devinfo->fw_name[0] != '\0') - strlcpy(fw_name, devinfo->fw_name, BRCMF_FW_NAME_LEN); - else - ret = brcmf_fw_map_chip_to_name(chip, chiprev, - brcmf_pcie_fwnames, - ARRAY_SIZE(brcmf_pcie_fwnames), - fw_name, NULL); - - return ret; + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ext, fw_name }, + }; + + fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, + brcmf_pcie_fwnames, + ARRAY_SIZE(brcmf_pcie_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return -ENOMEM; + + kfree(fwreq); + return 0; } static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { @@ -1581,24 +1582,6 @@ static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) } -static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo) -{ - int ret; - - /* Attach to the common driver interface */ - ret = brcmf_attach(&devinfo->pdev->dev, devinfo->settings); - if (ret) { - brcmf_err("brcmf_attach failed\n"); - } else { - ret = brcmf_bus_started(&devinfo->pdev->dev); - if (ret) - brcmf_err("dongle is not responding\n"); - } - - return ret; -} - - static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) { u32 ret_addr; @@ -1669,15 +1652,19 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { .write32 = brcmf_pcie_buscore_write32, }; +#define BRCMF_PCIE_FW_CODE 0 +#define BRCMF_PCIE_FW_NVRAM 1 + static void brcmf_pcie_setup(struct device *dev, int ret, - const struct firmware *fw, - void *nvram, u32 nvram_len) + struct brcmf_fw_request *fwreq) { + const struct firmware *fw; + void *nvram; struct brcmf_bus *bus; struct brcmf_pciedev *pcie_bus_dev; struct brcmf_pciedev_info *devinfo; struct brcmf_commonring **flowrings; - u32 i; + u32 i, nvram_len; /* check firmware loading result */ if (ret) @@ -1688,6 +1675,11 @@ static void brcmf_pcie_setup(struct device *dev, int ret, devinfo = pcie_bus_dev->devinfo; brcmf_pcie_attach(devinfo); + fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary; + nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data; + nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len; + kfree(fwreq); + /* Some of the firmwares have the size of the memory of the device * defined inside the firmware. This is because part of the memory in * the device is shared and the devision is determined by FW. Parse @@ -1735,7 +1727,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); brcmf_pcie_intr_enable(devinfo); - if (brcmf_pcie_attach_bus(devinfo) == 0) + if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0) return; brcmf_pcie_bus_console_read(devinfo); @@ -1744,20 +1736,41 @@ fail: device_release_driver(dev); } +static struct brcmf_fw_request * +brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) +{ + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ".bin", devinfo->fw_name }, + { ".txt", devinfo->nvram_name }, + }; + + fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev, + brcmf_pcie_fwnames, + ARRAY_SIZE(brcmf_pcie_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return NULL; + + fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; + fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; + fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); + fwreq->bus_nr = devinfo->pdev->bus->number; + + return fwreq; +} + static int brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; + struct brcmf_fw_request *fwreq; struct brcmf_pciedev_info *devinfo; struct brcmf_pciedev *pcie_bus_dev; struct brcmf_bus *bus; - u16 domain_nr; - u16 bus_nr; - domain_nr = pci_domain_nr(pdev->bus) + 1; - bus_nr = pdev->bus->number; - brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device, - domain_nr, bus_nr); + brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); ret = -ENOMEM; devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); @@ -1811,19 +1824,19 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); dev_set_drvdata(&pdev->dev, bus); - ret = brcmf_fw_map_chip_to_name(devinfo->ci->chip, devinfo->ci->chiprev, - brcmf_pcie_fwnames, - ARRAY_SIZE(brcmf_pcie_fwnames), - devinfo->fw_name, devinfo->nvram_name); - if (ret) + fwreq = brcmf_pcie_prepare_fw_request(devinfo); + if (!fwreq) { + ret = -ENOMEM; goto fail_bus; + } + + ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup); + if (ret < 0) { + kfree(fwreq); + goto fail_bus; + } + return 0; - ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM | - BRCMF_FW_REQ_NV_OPTIONAL, - devinfo->fw_name, devinfo->nvram_name, - brcmf_pcie_setup, domain_nr, bus_nr); - if (ret == 0) - return 0; fail_bus: kfree(bus->msgbuf); kfree(bus); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index d26ff219ef66..c5ff551ec659 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -54,7 +54,8 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) if (!proto->tx_queue_data || (proto->hdrpull == NULL) || (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL) || (proto->configure_addr_mode == NULL) || - (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL)) { + (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL) || + (proto->debugfs_create == NULL)) { brcmf_err("Not all proto handlers have been installed\n"); goto fail; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index 8a8e08f09ea0..d3c3b9a815ad 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -48,6 +48,7 @@ struct brcmf_proto { void (*del_if)(struct brcmf_if *ifp); void (*reset_if)(struct brcmf_if *ifp); int (*init_done)(struct brcmf_pub *drvr); + void (*debugfs_create)(struct brcmf_pub *drvr); void *pd; }; @@ -156,4 +157,10 @@ brcmf_proto_init_done(struct brcmf_pub *drvr) return drvr->proto->init_done(drvr); } +static inline void +brcmf_proto_debugfs_create(struct brcmf_pub *drvr) +{ + drvr->proto->debugfs_create(drvr); +} + #endif /* BRCMFMAC_PROTO_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 08686147b59d..1037df7297bb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -600,47 +600,44 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = { {4, 0x1} }; -BRCMF_FW_NVRAM_DEF(43143, "brcmfmac43143-sdio.bin", "brcmfmac43143-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43241B0, "brcmfmac43241b0-sdio.bin", - "brcmfmac43241b0-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43241B4, "brcmfmac43241b4-sdio.bin", - "brcmfmac43241b4-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43241B5, "brcmfmac43241b5-sdio.bin", - "brcmfmac43241b5-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4329, "brcmfmac4329-sdio.bin", "brcmfmac4329-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4330, "brcmfmac4330-sdio.bin", "brcmfmac4330-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4334, "brcmfmac4334-sdio.bin", "brcmfmac4334-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43340, "brcmfmac43340-sdio.bin", "brcmfmac43340-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4335, "brcmfmac4335-sdio.bin", "brcmfmac4335-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43362, "brcmfmac43362-sdio.bin", "brcmfmac43362-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43430A0, "brcmfmac43430a0-sdio.bin", "brcmfmac43430a0-sdio.txt"); +BRCMF_FW_DEF(43143, "brcmfmac43143-sdio"); +BRCMF_FW_DEF(43241B0, "brcmfmac43241b0-sdio"); +BRCMF_FW_DEF(43241B4, "brcmfmac43241b4-sdio"); +BRCMF_FW_DEF(43241B5, "brcmfmac43241b5-sdio"); +BRCMF_FW_DEF(4329, "brcmfmac4329-sdio"); +BRCMF_FW_DEF(4330, "brcmfmac4330-sdio"); +BRCMF_FW_DEF(4334, "brcmfmac4334-sdio"); +BRCMF_FW_DEF(43340, "brcmfmac43340-sdio"); +BRCMF_FW_DEF(4335, "brcmfmac4335-sdio"); +BRCMF_FW_DEF(43362, "brcmfmac43362-sdio"); +BRCMF_FW_DEF(4339, "brcmfmac4339-sdio"); +BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio"); /* Note the names are not postfixed with a1 for backward compatibility */ -BRCMF_FW_NVRAM_DEF(43430A1, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt"); -BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt"); -BRCMF_FW_NVRAM_DEF(4373, "brcmfmac4373-sdio.bin", "brcmfmac4373-sdio.txt"); +BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio"); +BRCMF_FW_DEF(43455, "brcmfmac43455-sdio"); +BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); +BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); +BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, 43241B5), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, 4329), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), - BRCMF_FW_NVRAM_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) + BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), + BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), + BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), + BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, 43241B5), + BRCMF_FW_ENTRY(BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, 4329), + BRCMF_FW_ENTRY(BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, 4330), + BRCMF_FW_ENTRY(BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, 4334), + BRCMF_FW_ENTRY(BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, 43340), + BRCMF_FW_ENTRY(BRCM_CC_43341_CHIP_ID, 0xFFFFFFFF, 43340), + BRCMF_FW_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335), + BRCMF_FW_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362), + BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), + BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), + BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), + BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), + BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), + BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), + BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) }; static void pkt_align(struct sk_buff *p, int len, int align) @@ -1706,8 +1703,7 @@ brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) u8 *buf = NULL, *rbuf; int sdret; - brcmf_dbg(TRACE, "Enter\n"); - + brcmf_dbg(SDIO, "Enter\n"); if (bus->rxblen) buf = vzalloc(bus->rxblen); if (!buf) @@ -1810,7 +1806,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new; u8 head_read = 0; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(SDIO, "Enter\n"); /* Not finished unless we encounter no more frames indication */ bus->rxpending = true; @@ -2345,7 +2341,7 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len) struct brcmf_sdio_hdrinfo hd_info = {0}; int ret; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(SDIO, "Enter\n"); /* Back the pointer to make room for bus header */ frame -= bus->tx_hdrlen; @@ -2521,7 +2517,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) uint framecnt; /* Temporary counter of tx/rx frames */ int err = 0; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(SDIO, "Enter\n"); sdio_claim_host(bus->sdiodev->func1); @@ -2606,7 +2602,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) /* Would be active due to wake-wlan in gSPI */ if (intstatus & I_CHIPACTIVE) { - brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n"); + brcmf_dbg(SDIO, "Dongle reports CHIPACTIVE\n"); intstatus &= ~I_CHIPACTIVE; } @@ -3411,6 +3407,20 @@ static int brcmf_sdio_bus_preinit(struct device *dev) u32 value; int err; + /* maxctl provided by common layer */ + if (WARN_ON(!bus_if->maxctl)) + return -EINVAL; + + /* Allocate control receive buffer */ + bus_if->maxctl += bus->roundup; + value = roundup((bus_if->maxctl + SDPCM_HDRLEN), ALIGNMENT); + value += bus->head_align; + bus->rxbuf = kmalloc(value, GFP_ATOMIC); + if (bus->rxbuf) + bus->rxblen = value; + + brcmf_sdio_debugfs_create(bus); + /* the commands below use the terms tx and rx from * a device perspective, ie. bus:txglom affects the * bus transfers from device to host. @@ -3990,22 +4000,24 @@ brcmf_sdio_watchdog(struct timer_list *t) } } -static int brcmf_sdio_get_fwname(struct device *dev, u32 chip, u32 chiprev, - u8 *fw_name) +static +int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - int ret = 0; - - if (sdiodev->fw_name[0] != '\0') - strlcpy(fw_name, sdiodev->fw_name, BRCMF_FW_NAME_LEN); - else - ret = brcmf_fw_map_chip_to_name(chip, chiprev, - brcmf_sdio_fwnames, - ARRAY_SIZE(brcmf_sdio_fwnames), - fw_name, NULL); + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ext, fw_name }, + }; + + fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, + brcmf_sdio_fwnames, + ARRAY_SIZE(brcmf_sdio_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return -ENOMEM; - return ret; + kfree(fwreq); + return 0; } static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { @@ -4021,15 +4033,19 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .get_fwname = brcmf_sdio_get_fwname, }; +#define BRCMF_SDIO_FW_CODE 0 +#define BRCMF_SDIO_FW_NVRAM 1 + static void brcmf_sdio_firmware_callback(struct device *dev, int err, - const struct firmware *code, - void *nvram, u32 nvram_len) + struct brcmf_fw_request *fwreq) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - struct brcmf_sdio *bus = sdiodev->bus; - struct brcmf_sdio_dev *sdiod = bus->sdiodev; + struct brcmf_sdio_dev *sdiod = bus_if->bus_priv.sdio; + struct brcmf_sdio *bus = sdiod->bus; struct brcmf_core *core = bus->sdio_core; + const struct firmware *code; + void *nvram; + u32 nvram_len; u8 saveclk; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); @@ -4037,8 +4053,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, if (err) goto fail; - if (!bus_if->drvr) - return; + code = fwreq->items[BRCMF_SDIO_FW_CODE].binary; + nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data; + nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len; + kfree(fwreq); /* try to download image and nvram to the dongle */ bus->alp_only = true; @@ -4051,7 +4069,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, bus->sdcnt.tickcnt = 0; brcmf_sdio_wd_timer(bus, true); - sdio_claim_host(sdiodev->func1); + sdio_claim_host(sdiod->func1); /* Make sure backplane clock is on, needed to generate F2 interrupt */ brcmf_sdio_clkctl(bus, CLK_AVAIL, false); @@ -4059,9 +4077,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, goto release; /* Force clocks on backplane to be sure F2 interrupt propagates */ - saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); + saveclk = brcmf_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (!err) { - brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, (saveclk | SBSDIO_FORCE_HT), &err); } if (err) { @@ -4073,7 +4091,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata), SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL); - err = sdio_enable_func(sdiodev->func2); + err = sdio_enable_func(sdiod->func2); brcmf_dbg(INFO, "enable F2: err=%d\n", err); @@ -4085,10 +4103,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, bus->hostintmask, NULL); - brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 8, &err); } else { /* Disable F2 again */ - sdio_disable_func(sdiodev->func2); + sdio_disable_func(sdiod->func2); goto release; } @@ -4096,7 +4114,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdio_sr_init(bus); } else { /* Restore previous clock setting */ - brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); } @@ -4104,7 +4122,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, /* Allow full data communication using DPC from now on. */ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); - err = brcmf_sdiod_intr_register(sdiodev); + err = brcmf_sdiod_intr_register(sdiod); if (err != 0) brcmf_err("intr register failed:%d\n", err); } @@ -4113,28 +4131,60 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, if (err != 0) brcmf_sdio_clkctl(bus, CLK_NONE, false); - sdio_release_host(sdiodev->func1); + sdio_release_host(sdiod->func1); + + /* Assign bus interface call back */ + sdiod->bus_if->dev = sdiod->dev; + sdiod->bus_if->ops = &brcmf_sdio_bus_ops; + sdiod->bus_if->chip = bus->ci->chip; + sdiod->bus_if->chiprev = bus->ci->chiprev; - err = brcmf_bus_started(dev); + /* Attach to the common layer, reserve hdr space */ + err = brcmf_attach(sdiod->dev, sdiod->settings); if (err != 0) { - brcmf_err("dongle is not responding\n"); + brcmf_err("brcmf_attach failed\n"); goto fail; } + + /* ready */ return; release: - sdio_release_host(sdiodev->func1); + sdio_release_host(sdiod->func1); fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); - device_release_driver(&sdiodev->func2->dev); + device_release_driver(&sdiod->func2->dev); device_release_driver(dev); } +static struct brcmf_fw_request * +brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus) +{ + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ".bin", bus->sdiodev->fw_name }, + { ".txt", bus->sdiodev->nvram_name }, + }; + + fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev, + brcmf_sdio_fwnames, + ARRAY_SIZE(brcmf_sdio_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return NULL; + + fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY; + fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; + + return fwreq; +} + struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) { int ret; struct brcmf_sdio *bus; struct workqueue_struct *wq; + struct brcmf_fw_request *fwreq; brcmf_dbg(TRACE, "Enter\n"); @@ -4188,39 +4238,13 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) bus->dpc_triggered = false; bus->dpc_running = false; - /* Assign bus interface call back */ - bus->sdiodev->bus_if->dev = bus->sdiodev->dev; - bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops; - bus->sdiodev->bus_if->chip = bus->ci->chip; - bus->sdiodev->bus_if->chiprev = bus->ci->chiprev; - /* default sdio bus header length for tx packet */ bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; - /* Attach to the common layer, reserve hdr space */ - ret = brcmf_attach(bus->sdiodev->dev, bus->sdiodev->settings); - if (ret != 0) { - brcmf_err("brcmf_attach failed\n"); - goto fail; - } - /* Query the F2 block size, set roundup accordingly */ bus->blocksize = bus->sdiodev->func2->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); - /* Allocate buffers */ - if (bus->sdiodev->bus_if->maxctl) { - bus->sdiodev->bus_if->maxctl += bus->roundup; - bus->rxblen = - roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN), - ALIGNMENT) + bus->head_align; - bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); - if (!(bus->rxbuf)) { - brcmf_err("rxbuf allocation failed\n"); - goto fail; - } - } - sdio_claim_host(bus->sdiodev->func1); /* Disable F2 to clear any intermediate frame state on the dongle */ @@ -4241,21 +4265,19 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) /* SR state */ bus->sr_enabled = false; - brcmf_sdio_debugfs_create(bus); brcmf_dbg(INFO, "completed!!\n"); - ret = brcmf_fw_map_chip_to_name(bus->ci->chip, bus->ci->chiprev, - brcmf_sdio_fwnames, - ARRAY_SIZE(brcmf_sdio_fwnames), - sdiodev->fw_name, sdiodev->nvram_name); - if (ret) + fwreq = brcmf_sdio_prepare_fw_request(bus); + if (!fwreq) { + ret = -ENOMEM; goto fail; + } - ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM, - sdiodev->fw_name, sdiodev->nvram_name, + ret = brcmf_fw_get_firmwares(sdiodev->dev, fwreq, brcmf_sdio_firmware_callback); if (ret != 0) { brcmf_err("async firmware request failed: %d\n", ret); + kfree(fwreq); goto fail; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index b27170c12482..a0873adcc01c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -46,11 +46,11 @@ #define BRCMF_USB_CBCTL_READ 1 #define BRCMF_USB_MAX_PKT_SIZE 1600 -BRCMF_FW_DEF(43143, "brcmfmac43143.bin"); -BRCMF_FW_DEF(43236B, "brcmfmac43236b.bin"); -BRCMF_FW_DEF(43242A, "brcmfmac43242a.bin"); -BRCMF_FW_DEF(43569, "brcmfmac43569.bin"); -BRCMF_FW_DEF(4373, "brcmfmac4373.bin"); +BRCMF_FW_DEF(43143, "brcmfmac43143"); +BRCMF_FW_DEF(43236B, "brcmfmac43236b"); +BRCMF_FW_DEF(43242A, "brcmfmac43242a"); +BRCMF_FW_DEF(43569, "brcmfmac43569"); +BRCMF_FW_DEF(4373, "brcmfmac4373"); static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -1128,69 +1128,53 @@ static void brcmf_usb_wowl_config(struct device *dev, bool enabled) device_set_wakeup_enable(devinfo->dev, false); } -static int brcmf_usb_get_fwname(struct device *dev, u32 chip, u32 chiprev, - u8 *fw_name) +static +int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name) { - struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); - int ret = 0; - - if (devinfo->fw_name[0] != '\0') - strlcpy(fw_name, devinfo->fw_name, BRCMF_FW_NAME_LEN); - else - ret = brcmf_fw_map_chip_to_name(chip, chiprev, - brcmf_usb_fwnames, - ARRAY_SIZE(brcmf_usb_fwnames), - fw_name, NULL); + struct brcmf_bus *bus = dev_get_drvdata(dev); + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ext, fw_name }, + }; + + fwreq = brcmf_fw_alloc_request(bus->chip, bus->chiprev, + brcmf_usb_fwnames, + ARRAY_SIZE(brcmf_usb_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return -ENOMEM; - return ret; + kfree(fwreq); + return 0; } static const struct brcmf_bus_ops brcmf_usb_bus_ops = { - .txdata = brcmf_usb_tx, + .preinit = brcmf_usb_up, .stop = brcmf_usb_down, + .txdata = brcmf_usb_tx, .txctl = brcmf_usb_tx_ctlpkt, .rxctl = brcmf_usb_rx_ctlpkt, .wowl_config = brcmf_usb_wowl_config, .get_fwname = brcmf_usb_get_fwname, }; -static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) -{ - int ret; - - /* Attach to the common driver interface */ - ret = brcmf_attach(devinfo->dev, devinfo->settings); - if (ret) { - brcmf_err("brcmf_attach failed\n"); - return ret; - } - - ret = brcmf_usb_up(devinfo->dev); - if (ret) - goto fail; - - ret = brcmf_bus_started(devinfo->dev); - if (ret) - goto fail; - - return 0; -fail: - brcmf_detach(devinfo->dev); - return ret; -} +#define BRCMF_USB_FW_CODE 0 static void brcmf_usb_probe_phase2(struct device *dev, int ret, - const struct firmware *fw, - void *nvram, u32 nvlen) + struct brcmf_fw_request *fwreq) { struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; + const struct firmware *fw; if (ret) goto error; brcmf_dbg(USB, "Start fw downloading\n"); + fw = fwreq->items[BRCMF_USB_FW_CODE].binary; + kfree(fwreq); + ret = check_file(fw->data); if (ret < 0) { brcmf_err("invalid firmware\n"); @@ -1206,7 +1190,8 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, if (ret) goto error; - ret = brcmf_usb_bus_setup(devinfo); + /* Attach to the common driver interface */ + ret = brcmf_attach(devinfo->dev, devinfo->settings); if (ret) goto error; @@ -1218,11 +1203,33 @@ error: device_release_driver(dev); } +static struct brcmf_fw_request * +brcmf_usb_prepare_fw_request(struct brcmf_usbdev_info *devinfo) +{ + struct brcmf_fw_request *fwreq; + struct brcmf_fw_name fwnames[] = { + { ".bin", devinfo->fw_name }, + }; + + fwreq = brcmf_fw_alloc_request(devinfo->bus_pub.devid, + devinfo->bus_pub.chiprev, + brcmf_usb_fwnames, + ARRAY_SIZE(brcmf_usb_fwnames), + fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return NULL; + + fwreq->items[BRCMF_USB_FW_CODE].type = BRCMF_FW_TYPE_BINARY; + + return fwreq; +} + static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) { struct brcmf_bus *bus = NULL; struct brcmf_usbdev *bus_pub = NULL; struct device *dev = devinfo->dev; + struct brcmf_fw_request *fwreq; int ret; brcmf_dbg(USB, "Enter\n"); @@ -1256,7 +1263,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) } if (!brcmf_usb_dlneeded(devinfo)) { - ret = brcmf_usb_bus_setup(devinfo); + ret = brcmf_attach(devinfo->dev, devinfo->settings); if (ret) goto fail; /* we are done */ @@ -1266,18 +1273,17 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->chip = bus_pub->devid; bus->chiprev = bus_pub->chiprev; - ret = brcmf_fw_map_chip_to_name(bus_pub->devid, bus_pub->chiprev, - brcmf_usb_fwnames, - ARRAY_SIZE(brcmf_usb_fwnames), - devinfo->fw_name, NULL); - if (ret) + fwreq = brcmf_usb_prepare_fw_request(devinfo); + if (!fwreq) { + ret = -ENOMEM; goto fail; + } /* request firmware here */ - ret = brcmf_fw_get_firmwares(dev, 0, devinfo->fw_name, NULL, - brcmf_usb_probe_phase2); + ret = brcmf_fw_get_firmwares(dev, fwreq, brcmf_usb_probe_phase2); if (ret) { brcmf_err("firmware request failed: %d\n", ret); + kfree(fwreq); goto fail; } @@ -1459,7 +1465,7 @@ static int brcmf_usb_resume(struct usb_interface *intf) brcmf_dbg(USB, "Enter\n"); if (!devinfo->wowl_enabled) - return brcmf_usb_bus_setup(devinfo); + return brcmf_attach(devinfo->dev, devinfo->settings); devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP; brcmf_usb_rx_fill_all(devinfo); @@ -1470,11 +1476,20 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf) { struct usb_device *usb = interface_to_usbdev(intf); struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); + struct brcmf_fw_request *fwreq; + int ret; brcmf_dbg(USB, "Enter\n"); - return brcmf_fw_get_firmwares(&usb->dev, 0, devinfo->fw_name, NULL, - brcmf_usb_probe_phase2); + fwreq = brcmf_usb_prepare_fw_request(devinfo); + if (!fwreq) + return -ENOMEM; + + ret = brcmf_fw_get_firmwares(&usb->dev, fwreq, brcmf_usb_probe_phase2); + if (ret < 0) + kfree(fwreq); + + return ret; } #define BRCMF_USB_DEVICE(dev_id) \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c index 3a03287fa912..db783e94f929 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c @@ -652,7 +652,6 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy) */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | - IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_IR; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c index 7a1fbb2e3a71..2fe1f6863278 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c @@ -214,7 +214,7 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, entry->read = read_fn; entry->drvr = drvr; - dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry, + dentry = debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops); return PTR_ERR_OR_ZERO(dentry); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index ddfdfe177e24..ecc89e718b9c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -108,7 +108,7 @@ MODULE_DEVICE_TABLE(bcma, brcms_coreid_table); * flags are specified by the BRCM_DL_* macros in * drivers/net/wireless/brcm80211/include/defs.h. */ -module_param_named(debug, brcm_msg_level, uint, S_IRUGO | S_IWUSR); +module_param_named(debug, brcm_msg_level, uint, 0644); #endif static struct ieee80211_channel brcms_2ghz_chantable[] = { @@ -1563,7 +1563,7 @@ void brcms_free_timer(struct brcms_timer *t) } /* - * precondition: perimeter lock has been acquired + * precondition: no locking required */ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) { @@ -1578,7 +1578,7 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kmemdup(pdata, len, GFP_ATOMIC); + *pbuf = kmemdup(pdata, len, GFP_KERNEL); if (*pbuf == NULL) goto fail; diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig index b22567dff893..e210ee8aa63b 100644 --- a/drivers/net/wireless/cisco/Kconfig +++ b/drivers/net/wireless/cisco/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_CISCO If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_CISCO @@ -33,7 +33,7 @@ config AIRO config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" - depends on CFG80211 && PCMCIA && (BROKEN || !M32R) + depends on CFG80211 && PCMCIA select WIRELESS_EXT select WEXT_SPY select WEXT_PRIV diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 54201c02fdb8..ce0fbf83285f 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -4519,21 +4519,21 @@ static int setup_proc_entry( struct net_device *dev, proc_set_user(apriv->proc_entry, proc_kuid, proc_kgid); /* Setup the StatsDelta */ - entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, + entry = proc_create_data("StatsDelta", 0444 & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Stats */ - entry = proc_create_data("Stats", S_IRUGO & proc_perm, + entry = proc_create_data("Stats", 0444 & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Status */ - entry = proc_create_data("Status", S_IRUGO & proc_perm, + entry = proc_create_data("Status", 0444 & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail; diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig index 5b14f2f64a8a..6fdc14b08b8e 100644 --- a/drivers/net/wireless/intel/Kconfig +++ b/drivers/net/wireless/intel/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_INTEL If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_INTEL diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 19c442cb93e4..236b52423506 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3538,7 +3538,7 @@ static ssize_t show_pci(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL); +static DEVICE_ATTR(pci, 0444, show_pci, NULL); static ssize_t show_cfg(struct device *d, struct device_attribute *attr, char *buf) @@ -3547,7 +3547,7 @@ static ssize_t show_cfg(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", (int)p->config); } -static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); +static DEVICE_ATTR(cfg, 0444, show_cfg, NULL); static ssize_t show_status(struct device *d, struct device_attribute *attr, char *buf) @@ -3556,7 +3556,7 @@ static ssize_t show_status(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", (int)p->status); } -static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); +static DEVICE_ATTR(status, 0444, show_status, NULL); static ssize_t show_capability(struct device *d, struct device_attribute *attr, char *buf) @@ -3565,7 +3565,7 @@ static ssize_t show_capability(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", (int)p->capability); } -static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL); +static DEVICE_ATTR(capability, 0444, show_capability, NULL); #define IPW2100_REG(x) { IPW_ ##x, #x } static const struct { @@ -3822,7 +3822,7 @@ static ssize_t show_registers(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL); +static DEVICE_ATTR(registers, 0444, show_registers, NULL); static ssize_t show_hardware(struct device *d, struct device_attribute *attr, char *buf) @@ -3863,7 +3863,7 @@ static ssize_t show_hardware(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL); +static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); static ssize_t show_memory(struct device *d, struct device_attribute *attr, char *buf) @@ -3957,7 +3957,7 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(memory, S_IWUSR | S_IRUGO, show_memory, store_memory); +static DEVICE_ATTR(memory, 0644, show_memory, store_memory); static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, char *buf) @@ -3993,7 +3993,7 @@ static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL); +static DEVICE_ATTR(ordinals, 0444, show_ordinals, NULL); static ssize_t show_stats(struct device *d, struct device_attribute *attr, char *buf) @@ -4014,7 +4014,7 @@ static ssize_t show_stats(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); +static DEVICE_ATTR(stats, 0444, show_stats, NULL); static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) { @@ -4112,7 +4112,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL); +static DEVICE_ATTR(internals, 0444, show_internals, NULL); static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, char *buf) @@ -4157,7 +4157,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL); +static DEVICE_ATTR(bssinfo, 0444, show_bssinfo, NULL); #ifdef CONFIG_IPW2100_DEBUG static ssize_t debug_level_show(struct device_driver *d, char *buf) @@ -4216,8 +4216,7 @@ static ssize_t store_fatal_error(struct device *d, return count; } -static DEVICE_ATTR(fatal_error, S_IWUSR | S_IRUGO, show_fatal_error, - store_fatal_error); +static DEVICE_ATTR(fatal_error, 0644, show_fatal_error, store_fatal_error); static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, char *buf) @@ -4250,7 +4249,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, return strnlen(buf, count); } -static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); +static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age); static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, char *buf) @@ -4304,7 +4303,7 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); +static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill); static struct attribute *ipw2100_sysfs_entries[] = { &dev_attr_hardware.attr, diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 8da87496cb58..87a5e414c2f7 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1303,7 +1303,7 @@ static ssize_t show_event_log(struct device *d, return len; } -static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL); +static DEVICE_ATTR(event_log, 0444, show_event_log, NULL); static ssize_t show_error(struct device *d, struct device_attribute *attr, char *buf) @@ -1351,7 +1351,7 @@ static ssize_t clear_error(struct device *d, return count; } -static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error); +static DEVICE_ATTR(error, 0644, show_error, clear_error); static ssize_t show_cmd_log(struct device *d, struct device_attribute *attr, char *buf) @@ -1378,7 +1378,7 @@ static ssize_t show_cmd_log(struct device *d, return len; } -static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); +static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL); #ifdef CONFIG_IPW2200_PROMISCUOUS static void ipw_prom_free(struct ipw_priv *priv); @@ -1443,8 +1443,7 @@ static ssize_t show_rtap_iface(struct device *d, } } -static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, - store_rtap_iface); +static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface); static ssize_t store_rtap_filter(struct device *d, struct device_attribute *attr, @@ -1475,8 +1474,7 @@ static ssize_t show_rtap_filter(struct device *d, priv->prom_priv ? priv->prom_priv->filter : 0); } -static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, - store_rtap_filter); +static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter); #endif static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, @@ -1520,7 +1518,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); +static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age); static ssize_t show_led(struct device *d, struct device_attribute *attr, char *buf) @@ -1553,7 +1551,7 @@ static ssize_t store_led(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led); +static DEVICE_ATTR(led, 0644, show_led, store_led); static ssize_t show_status(struct device *d, struct device_attribute *attr, char *buf) @@ -1562,7 +1560,7 @@ static ssize_t show_status(struct device *d, return sprintf(buf, "0x%08x\n", (int)p->status); } -static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); +static DEVICE_ATTR(status, 0444, show_status, NULL); static ssize_t show_cfg(struct device *d, struct device_attribute *attr, char *buf) @@ -1571,7 +1569,7 @@ static ssize_t show_cfg(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", (int)p->config); } -static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); +static DEVICE_ATTR(cfg, 0444, show_cfg, NULL); static ssize_t show_nic_type(struct device *d, struct device_attribute *attr, char *buf) @@ -1580,7 +1578,7 @@ static ssize_t show_nic_type(struct device *d, return sprintf(buf, "TYPE: %d\n", priv->nic_type); } -static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); +static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL); static ssize_t show_ucode_version(struct device *d, struct device_attribute *attr, char *buf) @@ -1594,7 +1592,7 @@ static ssize_t show_ucode_version(struct device *d, return sprintf(buf, "0x%08x\n", tmp); } -static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL); +static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL); static ssize_t show_rtc(struct device *d, struct device_attribute *attr, char *buf) @@ -1608,7 +1606,7 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", tmp); } -static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL); +static DEVICE_ATTR(rtc, 0644, show_rtc, NULL); /* * Add a device attribute to view/control the delay between eeprom @@ -1630,8 +1628,7 @@ static ssize_t store_eeprom_delay(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO, - show_eeprom_delay, store_eeprom_delay); +static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay); static ssize_t show_command_event_reg(struct device *d, struct device_attribute *attr, char *buf) @@ -1654,7 +1651,7 @@ static ssize_t store_command_event_reg(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(command_event_reg, 0644, show_command_event_reg, store_command_event_reg); static ssize_t show_mem_gpio_reg(struct device *d, @@ -1678,8 +1675,7 @@ static ssize_t store_mem_gpio_reg(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO, - show_mem_gpio_reg, store_mem_gpio_reg); +static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg); static ssize_t show_indirect_dword(struct device *d, struct device_attribute *attr, char *buf) @@ -1705,7 +1701,7 @@ static ssize_t store_indirect_dword(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(indirect_dword, 0644, show_indirect_dword, store_indirect_dword); static ssize_t show_indirect_byte(struct device *d, @@ -1732,7 +1728,7 @@ static ssize_t store_indirect_byte(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(indirect_byte, 0644, show_indirect_byte, store_indirect_byte); static ssize_t show_direct_dword(struct device *d, @@ -1759,8 +1755,7 @@ static ssize_t store_direct_dword(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, - show_direct_dword, store_direct_dword); +static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword); static int rf_kill_active(struct ipw_priv *priv) { @@ -1831,7 +1826,7 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); +static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill); static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, char *buf) @@ -1884,8 +1879,7 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan, - store_speed_scan); +static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan); static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, char *buf) @@ -1906,8 +1900,7 @@ static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, - show_net_stats, store_net_stats); +static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats); static ssize_t show_channels(struct device *d, struct device_attribute *attr, @@ -1953,7 +1946,7 @@ static ssize_t show_channels(struct device *d, return len; } -static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); +static DEVICE_ATTR(channels, 0400, show_channels, NULL); static void notify_wx_assoc_event(struct ipw_priv *priv) { diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c index c58c5b2dcce5..f00d45f54c76 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c @@ -276,7 +276,7 @@ static int __init libipw_init(void) " proc directory\n"); return -EIO; } - e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc, + e = proc_create("debug_level", 0644, libipw_proc, &debug_level_proc_fops); if (!e) { remove_proc_entry(DRV_PROCNAME, init_net.proc_net); diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 4b53ebf00c7f..62a9794f952b 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3122,7 +3122,7 @@ il3945_store_debug_level(struct device *d, struct device_attribute *attr, return strnlen(buf, count); } -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level, +static DEVICE_ATTR(debug_level, 0644, il3945_show_debug_level, il3945_store_debug_level); #endif /* CONFIG_IWLEGACY_DEBUG */ @@ -3139,7 +3139,7 @@ il3945_show_temperature(struct device *d, struct device_attribute *attr, return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); } -static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL); +static DEVICE_ATTR(temperature, 0444, il3945_show_temperature, NULL); static ssize_t il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) @@ -3165,8 +3165,7 @@ il3945_store_tx_power(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power, - il3945_store_tx_power); +static DEVICE_ATTR(tx_power, 0644, il3945_show_tx_power, il3945_store_tx_power); static ssize_t il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) @@ -3199,8 +3198,7 @@ il3945_store_flags(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags, - il3945_store_flags); +static DEVICE_ATTR(flags, 0644, il3945_show_flags, il3945_store_flags); static ssize_t il3945_show_filter_flags(struct device *d, struct device_attribute *attr, @@ -3235,7 +3233,7 @@ il3945_store_filter_flags(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags, +static DEVICE_ATTR(filter_flags, 0644, il3945_show_filter_flags, il3945_store_filter_flags); static ssize_t @@ -3306,7 +3304,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement, +static DEVICE_ATTR(measurement, 0600, il3945_show_measurement, il3945_store_measurement); static ssize_t @@ -3330,7 +3328,7 @@ il3945_show_retry_rate(struct device *d, struct device_attribute *attr, return sprintf(buf, "%d", il->retry_rate); } -static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate, +static DEVICE_ATTR(retry_rate, 0600, il3945_show_retry_rate, il3945_store_retry_rate); static ssize_t @@ -3340,7 +3338,7 @@ il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) return 0; } -static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL); +static DEVICE_ATTR(channels, 0400, il3945_show_channels, NULL); static ssize_t il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) @@ -3377,8 +3375,7 @@ il3945_store_antenna(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna, - il3945_store_antenna); +static DEVICE_ATTR(antenna, 0644, il3945_show_antenna, il3945_store_antenna); static ssize_t il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) @@ -3389,7 +3386,7 @@ il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) return sprintf(buf, "0x%08x\n", (int)il->status); } -static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL); +static DEVICE_ATTR(status, 0444, il3945_show_status, NULL); static ssize_t il3945_dump_error_log(struct device *d, struct device_attribute *attr, @@ -3404,7 +3401,7 @@ il3945_dump_error_log(struct device *d, struct device_attribute *attr, return strnlen(buf, count); } -static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log); +static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log); /***************************************************************************** * @@ -3943,18 +3940,18 @@ il3945_exit(void) MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); -module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO); +module_param_named(antenna, il3945_mod_params.antenna, int, 0444); MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); -module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO); +module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, - S_IRUGO); + 0444); MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); #ifdef CONFIG_IWLEGACY_DEBUG -module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); +module_param_named(debug, il_debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif -module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO); +module_param_named(fw_restart, il3945_mod_params.restart_fw, int, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); module_exit(il3945_exit); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index de63f2518f23..562e94870a9c 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4591,7 +4591,7 @@ il4965_store_debug_level(struct device *d, struct device_attribute *attr, return strnlen(buf, count); } -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level, +static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level, il4965_store_debug_level); #endif /* CONFIG_IWLEGACY_DEBUG */ @@ -4608,7 +4608,7 @@ il4965_show_temperature(struct device *d, struct device_attribute *attr, return sprintf(buf, "%d\n", il->temperature); } -static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL); +static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL); static ssize_t il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) @@ -4642,7 +4642,7 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr, return ret; } -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power, +static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power, il4965_store_tx_power); static struct attribute *il_sysfs_entries[] = { @@ -6859,18 +6859,17 @@ module_exit(il4965_exit); module_init(il4965_init); #ifdef CONFIG_IWLEGACY_DEBUG -module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); +module_param_named(debug, il_debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif -module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO); +module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO); +module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444); MODULE_PARM_DESC(queues_num, "number of hw queues."); -module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO); +module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444); MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); -module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, - S_IRUGO); +module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])"); -module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO); +module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 365a4187fc37..54ff83829afb 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2768,16 +2768,16 @@ il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir) { struct il_lq_sta *lq_sta = il_sta; lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + debugfs_create_file("rate_scale_table", 0600, dir, lq_sta, &rs_sta_dbgfs_scale_table_ops); lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta, + debugfs_create_file("rate_stats_table", 0400, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta, + debugfs_create_file("rate_scale_data", 0400, dir, lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + debugfs_create_u8("tx_agg_tid_enable", 0600, dir, &lq_sta->tx_agg_tid_en); } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 558bb16bfd46..063e19ced7c8 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -435,7 +435,7 @@ EXPORT_SYMBOL(il_send_cmd_pdu_async); /* default: IL_LED_BLINK(0) using blinking idx table */ static int led_mode; -module_param(led_mode, int, S_IRUGO); +module_param(led_mode, int, 0444); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); @@ -3372,7 +3372,7 @@ MODULE_LICENSE("GPL"); * default: bt_coex_active = true (BT_COEX_ENABLE) */ static bool bt_coex_active = true; -module_param(bt_coex_active, bool, S_IRUGO); +module_param(bt_coex_active, bool, 0444); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); u32 il_debug_level; diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c index 6fc6b7ff9849..d76073def677 100644 --- a/drivers/net/wireless/intel/iwlegacy/debug.c +++ b/drivers/net/wireless/intel/iwlegacy/debug.c @@ -135,16 +135,14 @@ EXPORT_SYMBOL(il_update_stats); #define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ + __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \ if (IS_ERR(__tmp) || !__tmp) \ goto err; \ } while (0) #define DEBUGFS_ADD_X32(name, parent, ptr) do { \ struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ + __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \ if (IS_ERR(__tmp) || !__tmp) \ goto err; \ } while (0) @@ -1365,35 +1363,35 @@ il_dbgfs_register(struct il_priv *il, const char *name) if (!dir_debug) goto err; - DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(nvm, dir_data, 0400); + DEBUGFS_ADD_FILE(sram, dir_data, 0600); + DEBUGFS_ADD_FILE(stations, dir_data, 0400); + DEBUGFS_ADD_FILE(channels, dir_data, 0400); + DEBUGFS_ADD_FILE(status, dir_data, 0400); + DEBUGFS_ADD_FILE(interrupt, dir_data, 0600); + DEBUGFS_ADD_FILE(qos, dir_data, 0400); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, 0600); + DEBUGFS_ADD_FILE(rx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(tx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, 0400); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, 0400); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, 0400); + DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, 0200); + DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, 0200); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, 0400); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, 0200); + DEBUGFS_ADD_FILE(force_reset, dir_debug, 0600); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, 0400); if (il->cfg->sensitivity_calib_by_driver) - DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(sensitivity, dir_debug, 0400); if (il->cfg->chain_noise_calib_by_driver) - DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(chain_noise, dir_debug, 0400); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, 0200); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, 0200); + DEBUGFS_ADD_FILE(wd_timeout, dir_debug, 0200); if (il->cfg->sensitivity_calib_by_driver) DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &il->disable_sens_cal); diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index c5f2ddf9b0fe..e5a2fc738ac3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING config IWLWIFI_PCIE_RTPM bool "Enable runtime power management mode for PCIe devices" depends on IWLMVM && PM && EXPERT - default false help Say Y here to enable runtime power management for PCIe devices. If enabled, the device will go into low power mode diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 48f6f80eb24b..dffd9df782b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,7 +57,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 36 +#define IWL_22000_UCODE_API_MAX 38 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 24 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 90a1d14cf7d2..e1c869a1f8cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -53,9 +55,10 @@ #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-agn-hw.h" +#include "fw/file.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 36 +#define IWL9000_UCODE_API_MAX 38 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 @@ -265,6 +268,67 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { .integrated = true, .soc_latency = 5000, }; + +const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 482ac8fdc67b..096a07c5a33f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -48,16 +48,14 @@ #define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ + __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \ if (IS_ERR(__tmp) || !__tmp) \ goto err; \ } while (0) #define DEBUGFS_ADD_X32(name, parent, ptr) do { \ struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ - parent, ptr); \ + __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \ if (IS_ERR(__tmp) || !__tmp) \ goto err; \ } while (0) @@ -2370,48 +2368,48 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) if (!dir_debug) goto err; - DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); - - DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); - DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); - DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(nvm, dir_data, 0400); + DEBUGFS_ADD_FILE(sram, dir_data, 0600); + DEBUGFS_ADD_FILE(wowlan_sram, dir_data, 0400); + DEBUGFS_ADD_FILE(stations, dir_data, 0400); + DEBUGFS_ADD_FILE(channels, dir_data, 0400); + DEBUGFS_ADD_FILE(status, dir_data, 0400); + DEBUGFS_ADD_FILE(rx_handlers, dir_data, 0600); + DEBUGFS_ADD_FILE(qos, dir_data, 0400); + DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 0600); + DEBUGFS_ADD_FILE(current_sleep_command, dir_data, 0400); + DEBUGFS_ADD_FILE(thermal_throttling, dir_data, 0400); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, 0600); + DEBUGFS_ADD_FILE(temperature, dir_data, 0400); + + DEBUGFS_ADD_FILE(power_save_status, dir_debug, 0400); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, 0200); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, 0200); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, 0600); + DEBUGFS_ADD_FILE(rf_reset, dir_debug, 0600); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, 0200); + DEBUGFS_ADD_FILE(protection_mode, dir_debug, 0600); + DEBUGFS_ADD_FILE(sensitivity, dir_debug, 0400); + DEBUGFS_ADD_FILE(chain_noise, dir_debug, 0400); + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, 0600); + DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, 0400); + DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, 0400); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, 0200); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, 0200); + DEBUGFS_ADD_FILE(echo_test, dir_debug, 0200); + DEBUGFS_ADD_FILE(fw_restart, dir_debug, 0200); #ifdef CONFIG_IWLWIFI_DEBUG - DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(log_event, dir_debug, 0600); #endif if (iwl_advanced_bt_coexist(priv)) - DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(bt_traffic, dir_debug, 0400); /* Calibrations disabled/enabled status*/ - DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(calib_disabled, dir_rf, 0600); /* * Create a symlink with mac80211. This is not very robust, as it does diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index d11d72615de2..e68254e12764 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1651,12 +1651,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) priv->status, table.valid); } - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.line, - table.blink2, table.ilink1, table.ilink2, - table.bcon_time, table.gp1, table.gp2, - table.gp3, table.ucode_ver, table.hw_ver, - 0, table.brd_ver); + trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver); IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(priv, "0x%08X | uPc\n", table.pc); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index ddcd8c2d66cd..98050d7be411 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3276,17 +3276,17 @@ static void rs_add_debugfs(void *priv, void *priv_sta, { struct iwl_lq_sta *lq_sta = priv_sta; lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); + debugfs_create_file("rate_scale_table", 0600, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); + debugfs_create_file("rate_stats_table", 0400, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + debugfs_create_file("rate_scale_data", 0400, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, - &lq_sta->tx_agg_tid_en); + debugfs_create_u8("tx_agg_tid_enable", 0600, dir, + &lq_sta->tx_agg_tid_en); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 3fd07bc80f54..37c57bcbfb4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -311,6 +313,17 @@ struct iwl_mcc_update_resp_v1 { } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ /** + * enum iwl_geo_information - geographic information. + * @GEO_NO_INFO: no special info for this geo profile. + * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params + * for the 5 GHz band. + */ +enum iwl_geo_information { + GEO_NO_INFO = 0, + GEO_WMM_ETSI_5GHZ_INFO = BIT(0), +}; + +/** * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). @@ -320,7 +333,8 @@ struct iwl_mcc_update_resp_v1 { * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source * @time: time elapsed from the MCC test start (in 30 seconds TU) - * @reserved: reserved. + * @geo_info: geographic specific profile information + * see &enum iwl_geo_information. * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 * channels, depending on platform) * @channels: channel control data map, DWORD for each channel. Only the first @@ -332,10 +346,10 @@ struct iwl_mcc_update_resp { u8 cap; u8 source_id; __le16 time; - __le16 reserved; + __le16 geo_info; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** * struct iwl_mcc_chub_notif - chub notifies of mcc change diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 3bfc657f6b42..7af3a0f51b77 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -30,6 +30,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -579,8 +580,23 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + /* Extended dwell is obselete when adaptive dwell is used, making this + * bit reusable. Hence, probe request defer is used only when adaptive + * dwell is supported. */ + IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), + IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14), + IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15), +}; + +/** + * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 + * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete + * notification per channel or not. + */ +enum iwl_umac_scan_general_flags2 { + IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), }; /** @@ -629,6 +645,18 @@ struct iwl_scan_req_umac_tail { } __packed; /** + * struct iwl_scan_umac_chan_param + * @flags: channel flags &enum iwl_scan_channel_flags + * @count: num of channels in scan request + * @reserved: for future use and alignment + */ +struct iwl_scan_umac_chan_param { + u8 flags; + u8 count; + __le16 reserved; +} __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + +/** * struct iwl_scan_req_umac * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets @@ -636,23 +664,24 @@ struct iwl_scan_req_umac_tail { * @general_flags: &enum iwl_umac_scan_general_flags * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 - * @active_dwell: dwell time for active scan - * @passive_dwell: dwell time for passive scan + * @active_dwell: dwell time for active scan per LMAC + * @passive_dwell: dwell time for passive scan per LMAC * @fragmented_dwell: dwell time for fragmented passive scan * @adwell_default_n_aps: for adaptive dwell the default number of APs * per channel * @adwell_default_n_aps_social: for adaptive dwell the default * number of APs per social (1,6,11) channel + * @general_flags2: &enum iwl_umac_scan_general_flags2 * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs * @scan_priority: scan internal prioritization &enum iwl_scan_priority - * @channel_flags: &enum iwl_scan_channel_flags - * @n_channels: num of channels in scan request + * @num_of_fragments: Number of fragments needed for full coverage per band. + * Relevant only for fragmented scan. + * @channel: &struct iwl_scan_umac_chan_param * @reserved: for future use and alignment - * @reserved2: for future use and alignment * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail @@ -673,10 +702,7 @@ struct iwl_scan_req_umac { __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved2; + struct iwl_scan_umac_chan_param channel; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { @@ -687,10 +713,7 @@ struct iwl_scan_req_umac { __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved2; + struct iwl_scan_umac_chan_param channel; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ struct { @@ -704,16 +727,30 @@ struct iwl_scan_req_umac { __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved2; + struct iwl_scan_umac_chan_param channel; u8 data[]; } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ + struct { + u8 active_dwell[SCAN_TWO_LMACS]; + u8 reserved2; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 general_flags2; + __le16 adwell_max_budget; + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + u8 passive_dwell[SCAN_TWO_LMACS]; + u8 num_of_fragments[SCAN_TWO_LMACS]; + struct iwl_scan_umac_chan_param channel; + u8 data[]; + } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ + 4 * sizeof(u8)) #define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ 2 * sizeof(u8) - sizeof(__le16)) #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 3721a3ed358b..f824bebceb06 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -211,7 +211,7 @@ enum { * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_START_IMMEDIATELY: start time event immediately * @TE_V2_DEP_OTHER: depends on another time event * @TE_V2_DEP_TSF: depends on a specific time * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC @@ -230,7 +230,7 @@ enum iwl_time_event_policy { TE_V2_NOTIF_HOST_FRAG_END = BIT(5), TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), + TE_V2_START_IMMEDIATELY = BIT(11), /* placement characteristics */ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 67aefc8fc9ac..fa283285fcbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,6 +68,7 @@ #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" +#include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" @@ -942,7 +945,6 @@ dump_trans_data: out: iwl_fw_free_dump_desc(fwrt); - fwrt->dump.trig = NULL; clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); } @@ -1006,6 +1008,12 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, { struct iwl_fw_dump_desc *desc; + if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) { + IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); + iwl_force_nmi(fwrt->trans); + return 0; + } + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) return -ENOMEM; @@ -1079,6 +1087,9 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); + /* start default config marker cmd for syncing logs */ + iwl_fw_trigger_timestamp(fwrt, 1); + /* Send all HCMDs for configuring the FW debug */ ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { @@ -1112,6 +1123,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work) fwrt->ops->dump_start(fwrt->ops_ctx)) return; + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) { + IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); + iwl_fw_free_dump_desc(fwrt); + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); + goto out; + } + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* stop recording */ iwl_fw_dbg_stop_recording(fwrt); @@ -1145,7 +1164,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); } } - +out: if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 223fb77a3aa9..72259bff9922 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; + fwrt->dump.trig = NULL; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index e2ded29a145d..8f005cd69559 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -157,6 +157,20 @@ static void iwl_fw_timestamp_marker_wk(struct work_struct *work) ret, jiffies_to_msecs(delay) / 1000); } +void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) +{ + IWL_INFO(fwrt, + "starting timestamp_marker trigger with delay: %us\n", + delay); + + iwl_fw_cancel_timestamp(fwrt); + + fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); +} + static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count, loff_t *ppos) @@ -168,16 +182,8 @@ static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, if (ret < 0) return ret; - IWL_INFO(fwrt, - "starting timestamp_marker trigger with delay: %us\n", - delay); + iwl_fw_trigger_timestamp(fwrt, delay); - iwl_fw_cancel_timestamp(fwrt); - - fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); - - schedule_delayed_work(&fwrt->timestamp.wk, - round_jiffies_relative(fwrt->timestamp.delay)); return count; } @@ -187,7 +193,7 @@ int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); - FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR); + FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); return 0; err: IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index e57ff92a68ae..d93f6a4bb22d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -75,6 +75,22 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) cancel_delayed_work_sync(&fwrt->timestamp.wk); } +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) +{ + if (!fwrt->timestamp.delay) + return; + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); +} + +void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); + #else static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) @@ -84,4 +100,11 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, + u32 delay) {} + #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 1a05d506ac9a..9b2805e1e3b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -250,6 +250,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * indicating low latency direction. * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is * deprecated. + * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8 + * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8 * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -265,10 +267,12 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, + IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -441,6 +445,7 @@ enum iwl_fw_phy_cfg { FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, + FW_PHY_CFG_SHARED_CLK = BIT(31), }; #define IWL_UCODE_MAX_CS 1 @@ -616,6 +621,14 @@ enum iwl_fw_dbg_trigger_mode { }; /** + * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers + * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart + */ +enum iwl_fw_dbg_trigger_flags { + IWL_FW_DBG_FORCE_RESTART = BIT(0), +}; + +/** * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger * @IWL_FW_DBG_CONF_VIF_ANY: any vif type * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode @@ -651,6 +664,7 @@ enum iwl_fw_dbg_trigger_vif_type { * @occurrences: number of occurrences. 0 means the trigger will never fire. * @trig_dis_ms: the time, in milliseconds, after an occurrence of this * trigger in which another occurrence should be ignored. + * @flags: &enum iwl_fw_dbg_trigger_flags */ struct iwl_fw_dbg_trigger_tlv { __le32 id; @@ -661,7 +675,8 @@ struct iwl_fw_dbg_trigger_tlv { u8 start_conf_id; __le16 occurrences; __le16 trig_dis_ms; - __le16 reserved[3]; + u8 flags; + u8 reserved[5]; u8 data[0]; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index c39fe84bb4c4..2efac307909e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); -void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) +void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) { - iwl_fw_cancel_timestamp(fwrt); + iwl_fw_suspend_timestamp(fwrt); } -IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); +IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); + +void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt) +{ + iwl_fw_resume_timestamp(fwrt); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index e25c049f980f..3fb940ebd74a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@ struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); + bool (*fw_running)(void *ctx); }; #define MAX_NUM_LMAC 2 @@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); +void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); + +void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); + static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 258d439bb0a9..f0f5636dd3ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -398,6 +398,7 @@ struct iwl_cfg { u8 ucode_api_max; u8 ucode_api_min; u32 min_umac_error_event_table; + u32 extra_phy_cfg_flags; }; /* @@ -477,6 +478,10 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 9518a82f44c2..27e3e4e96aa2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -126,14 +126,11 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen, __entry->skbaddr) ); +struct iwl_error_event_table; TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, - u32 data1, u32 data2, u32 line, u32 blink2, u32 ilink1, - u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 rev_type, - u32 major, u32 minor, u32 hw_ver, u32 brd_ver), - TP_ARGS(dev, desc, tsf_low, data1, data2, line, - blink2, ilink1, ilink2, bcon_time, gp1, gp2, - rev_type, major, minor, hw_ver, brd_ver), + TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table, + u32 hw_ver, u32 brd_ver), + TP_ARGS(dev, table, hw_ver, brd_ver), TP_STRUCT__entry( DEV_ENTRY __field(u32, desc) @@ -155,20 +152,20 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, ), TP_fast_assign( DEV_ASSIGN; - __entry->desc = desc; - __entry->tsf_low = tsf_low; - __entry->data1 = data1; - __entry->data2 = data2; - __entry->line = line; - __entry->blink2 = blink2; - __entry->ilink1 = ilink1; - __entry->ilink2 = ilink2; - __entry->bcon_time = bcon_time; - __entry->gp1 = gp1; - __entry->gp2 = gp2; - __entry->rev_type = rev_type; - __entry->major = major; - __entry->minor = minor; + __entry->desc = table->error_id; + __entry->tsf_low = table->tsf_low; + __entry->data1 = table->data1; + __entry->data2 = table->data2; + __entry->line = table->line; + __entry->blink2 = table->blink2; + __entry->ilink1 = table->ilink1; + __entry->ilink2 = table->ilink2; + __entry->bcon_time = table->bcon_time; + __entry->gp1 = table->gp1; + __entry->gp2 = table->gp2; + __entry->rev_type = table->gp3; + __entry->major = table->ucode_ver; + __entry->minor = table->hw_ver; __entry->hw_ver = hw_ver; __entry->brd_ver = brd_ver; ), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c index 50510fb6ab8c..6aa719865a58 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -30,6 +30,7 @@ #ifndef __CHECKER__ #include "iwl-trans.h" +#include "dvm/commands.h" #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 9c4a7f648a44..aa2d5c14e202 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1768,41 +1768,36 @@ static void __exit iwl_drv_exit(void) module_exit(iwl_drv_exit); #ifdef CONFIG_IWLWIFI_DEBUG -module_param_named(debug, iwlwifi_mod_params.debug_level, uint, - S_IRUGO | S_IWUSR); +module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif -module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, S_IRUGO); +module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); -module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); +module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444); MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); -module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, - int, S_IRUGO); +module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)"); -module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, S_IRUGO); +module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling, - int, S_IRUGO); + int, 0444); MODULE_PARM_DESC(antenna_coupling, "specify antenna coupling in dB (default: 0 dB)"); -module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); +module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); -module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, - bool, S_IRUGO); +module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, bool, 0444); MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); -module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, - bool, S_IRUGO); +module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); -module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, - uint, S_IRUGO | S_IWUSR); +module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); @@ -1823,31 +1818,27 @@ MODULE_PARM_DESC(uapsd_disable, * default: bt_coex_active = true (BT_COEX_ENABLE) */ module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, - bool, S_IRUGO); + bool, 0444); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); -module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO); +module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); -module_param_named(power_save, iwlwifi_mod_params.power_save, - bool, S_IRUGO); +module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444); MODULE_PARM_DESC(power_save, "enable WiFi power management (default: disable)"); -module_param_named(power_level, iwlwifi_mod_params.power_level, - int, S_IRUGO); +module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444); MODULE_PARM_DESC(power_level, "default power save level (range from 1 - 5, default: 1)"); -module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO); +module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444); MODULE_PARM_DESC(fw_monitor, "firmware monitor - to debug FW (default: false - needs lots of memory)"); -module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_timeout, - uint, S_IRUGO); +module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_timeout, uint, 0444); MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); -module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, - S_IRUGO); +module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 976640fed334..96b52a275ee3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -110,6 +110,8 @@ #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 +#define IWL_MVM_ADWELL_ENABLE 1 +#define IWL_MVM_ADWELL_MAX_BUDGET 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 0e6cf39285f4..2efe9b099556 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) /* make sure the d0i3 exit work is not pending */ flush_work(&mvm->d0i3_exit_work); + iwl_fw_runtime_suspend(&mvm->fwrt); + ret = iwl_trans_suspend(trans); if (ret) return ret; @@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_fw_runtime_resume(&mvm->fwrt); + return ret; } @@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + iwl_fw_runtime_suspend(&mvm->fwrt); + /* start pseudo D3 */ rtnl_lock(); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); @@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) __iwl_mvm_resume(mvm, true); rtnl_unlock(); + iwl_fw_runtime_resume(&mvm->fwrt); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_abort_notification_waits(&mvm->notif_wait); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 4228fac77f41..f7fcf700196b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1276,7 +1276,6 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; - bool prev; u8 value; int ret; @@ -1287,9 +1286,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return -EINVAL; mutex_lock(&mvm->mutex); - prev = iwl_mvm_vif_low_latency(mvmvif); - mvmvif->low_latency_dbgfs = value; - iwl_mvm_update_low_latency(mvm, vif, prev); + iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); mutex_unlock(&mvm->mutex); return count; @@ -1306,9 +1303,9 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, len = scnprintf(buf, sizeof(buf) - 1, "traffic=%d\ndbgfs=%d\nvcmd=%d\n", - mvmvif->low_latency_traffic, - mvmvif->low_latency_dbgfs, - mvmvif->low_latency_vcmd); + !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), + !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), + !!(mvmvif->low_latency & LOW_LATENCY_VCMD)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1506,44 +1503,36 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) - MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | - S_IRUSR); - - MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, - mvmvif->dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600); + + MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) - MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, - mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + 0600); MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + 0600); MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + 0600); MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, - S_IRUSR | S_IWUSR); + 0600); MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, - S_IRUSR); + 0400); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a7892c1254a2..0e6401cd7ccc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, { int ret; - if (!iwl_mvm_firmware_running(mvm)) - return -EIO; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); if (ret) return ret; @@ -1915,7 +1914,7 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) - MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR); + MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); return; err: @@ -1931,48 +1930,45 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) mvm->debugfs_dir = dbgfs_dir; - MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, - S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, - S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR); - MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, - S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR); - MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); - MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); #ifdef CONFIG_ACPI - MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400); #endif if (!debugfs_create_bool("enable_scan_iteration_notif", - S_IRUSR | S_IWUSR, + 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled)) goto err; - if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR, + if (!debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) goto err; @@ -1983,50 +1979,49 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) if (!bcast_dir) goto err; - if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR, - bcast_dir, - &mvm->dbgfs_bcast_filtering.override)) + if (!debugfs_create_bool("override", 0600, + bcast_dir, + &mvm->dbgfs_bcast_filtering.override)) goto err; MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, - bcast_dir, S_IWUSR | S_IRUSR); + bcast_dir, 0600); MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, - bcast_dir, S_IWUSR | S_IRUSR); + bcast_dir, 0600); } #endif #ifdef CONFIG_PM_SLEEP - MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); - MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); - if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR, + MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); + if (!debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert)) goto err; - if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR, + if (!debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, &mvm->last_netdetect_scans)) goto err; #endif - if (!debugfs_create_u8("ps_disabled", S_IRUSR, + if (!debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled)) goto err; - if (!debugfs_create_blob("nvm_hw", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_hw_blob)) + if (!debugfs_create_blob("nvm_hw", 0400, + mvm->debugfs_dir, &mvm->nvm_hw_blob)) goto err; - if (!debugfs_create_blob("nvm_sw", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_sw_blob)) + if (!debugfs_create_blob("nvm_sw", 0400, + mvm->debugfs_dir, &mvm->nvm_sw_blob)) goto err; - if (!debugfs_create_blob("nvm_calib", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_calib_blob)) + if (!debugfs_create_blob("nvm_calib", 0400, + mvm->debugfs_dir, &mvm->nvm_calib_blob)) goto err; - if (!debugfs_create_blob("nvm_prod", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_prod_blob)) + if (!debugfs_create_blob("nvm_prod", 0400, + mvm->debugfs_dir, &mvm->nvm_prod_blob)) goto err; - if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR, + if (!debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) goto err; - debugfs_create_file("mem", S_IRUSR | S_IWUSR, dbgfs_dir, mvm, - &iwl_dbgfs_mem_ops); + debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops); /* * Create a symlink with mac80211. It will be removed when mac80211 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0920be637b57..3c59109bea20 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -433,6 +433,10 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); + + /* set flags extra PHY configuration flags from the device's cfg */ + phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags); + phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 2f22e14e00fe..8ba16fc24e3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } /* Allocate the CAB queue for softAP and GO interfaces */ - if (vif->type == NL80211_IFTYPE_AP) { + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8aed40a8bc38..51b30424575b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -420,6 +421,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); @@ -660,6 +662,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_EXT_FEATURE_SET_SCAN_DWELL); } + if (iwl_mvm_is_oce_supported(mvm)) { + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP @@ -2106,15 +2119,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_remove; - ret = iwl_mvm_add_mcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Send the bcast station. At this stage the TBTT and DTIM time events - * are added and applied to the scheduler */ - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); - if (ret) - goto out_rm_mcast; + /* + * This is not very nice, but the simplest: + * For older FWs adding the mcast sta before the bcast station may + * cause assert 0x2b00. + * This is fixed in later FW so make the order of removal depend on + * the TLV + */ + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) + goto out_unbind; + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) { + iwl_mvm_rm_mcast_sta(mvm, vif); + goto out_unbind; + } + } else { + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) + goto out_unbind; + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) { + iwl_mvm_send_rm_bcast_sta(mvm, vif); + goto out_unbind; + } + } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2144,7 +2182,6 @@ out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); -out_rm_mcast: iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); @@ -2682,6 +2719,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); + ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { @@ -2775,9 +2816,6 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; - if (WARN_ON_ONCE(vif->bss_conf.assoc)) - return; - /* * iwl_mvm_protect_session() reads directly from the device * (the system time), so make sure it is available. @@ -3465,6 +3503,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = 0; goto out; case NL80211_IFTYPE_STATION: + mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ @@ -3508,7 +3547,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 2 * vif->bss_conf.beacon_int; + u32 duration = 3 * vif->bss_conf.beacon_int; /* iwl_mvm_protect_session() reads directly from the * device (the system time), so make sure it is @@ -3521,6 +3560,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, /* Protect the session to make sure we hear the first * beacon on the new channel. */ + mvmvif->csa_bcn_pending = true; iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); @@ -3959,6 +3999,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; + mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2d28e0804218..d2cf751db68d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -90,6 +90,7 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" +#include "fw/debugfs.h" #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -299,6 +300,18 @@ enum iwl_bt_force_ant_mode { }; /** +* struct iwl_mvm_low_latency_cause - low latency set causes +* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected +* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs +* @LOW_LATENCY_VCMD: low latency mode set from vendor command +*/ +enum iwl_mvm_low_latency_cause { + LOW_LATENCY_TRAFFIC = BIT(0), + LOW_LATENCY_DEBUGFS = BIT(1), + LOW_LATENCY_VCMD = BIT(2), +}; + +/** * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled @@ -334,9 +347,8 @@ struct iwl_mvm_vif_bf_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency_traffic: indicates low latency traffic was detected - * @low_latency_dbgfs: low latency mode set from debugfs - * @low_latency_vcmd: low latency mode set from vendor command + * @low_latency: indicates low latency is set, see + * enum &iwl_mvm_low_latency_cause for causes. * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -366,7 +378,7 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; + u8 low_latency; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -437,6 +449,9 @@ struct iwl_mvm_vif { bool csa_failed; u16 csa_target_freq; + /* Indicates that we are waiting for a beacon on a new channel */ + bool csa_bcn_pending; + /* TCP Checksum Offload */ netdev_features_t features; }; @@ -1151,6 +1166,18 @@ static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_ADAPTIVE_DWELL); } +static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2); +} + +static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) +{ + /* OCE should never be enabled for LMAC scan FWs */ + return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1740,7 +1767,8 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value); + bool low_latency, + enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); /* get VMACLowLatencyMode */ @@ -1756,9 +1784,17 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - return mvmvif->low_latency_dbgfs || - mvmvif->low_latency_traffic || - mvmvif->low_latency_vcmd; + return mvmvif->low_latency; +} + +static inline +void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, + enum iwl_mvm_low_latency_cause cause) +{ + if (set) + mvmvif->low_latency |= cause; + else + mvmvif->low_latency &= ~cause; } /* hw scheduler queue config */ @@ -1783,6 +1819,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { + iwl_fw_cancel_timestamp(&mvm->fwrt); iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fw_dump_conf_clear(&mvm->fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5d525a0023dc..224bfa1bcf53 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -102,14 +104,14 @@ struct iwl_mvm_mod_params iwlmvm_mod_params = { /* rest of fields are 0 by default */ }; -module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); +module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); MODULE_PARM_DESC(init_dbg, "set to true to debug an ASSERT in INIT fw (default: false"); -module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); +module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, - bool, S_IRUGO); + bool, 0444); MODULE_PARM_DESC(tfd_q_hang_detect, "TFD queues hang detection (default: true"); @@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); } +static bool iwl_mvm_fwrt_fw_running(void *ctx) +{ + return iwl_mvm_firmware_running(ctx); +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, + .fw_running = iwl_mvm_fwrt_fw_running, }; static struct iwl_op_mode * @@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: - iwl_fw_runtime_exit(&mvm->fwrt); iwl_fw_flush_dump(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) @@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); #endif - iwl_fw_runtime_exit(&mvm->fwrt); iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 305cd56bf746..7f5434b34d0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -34,6 +30,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -286,6 +283,20 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) return; ctxt->ref--; + + /* + * Move unused phy's to a default channel. When the phy is moved the, + * fw will cleanup immediate quiet bit if it was previously set, + * otherwise we might not be able to reuse this phy. + */ + if (ctxt->ref == 0) { + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; + + chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); + } } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 55d1274c6092..fb5745660509 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -234,13 +234,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) struct iwl_mvm_sta *mvmsta; struct iwl_lq_sta_rs_fw *lq_sta; + rcu_read_lock(); + notif = (void *)pkt->data; mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id); if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); - return; + goto out; } lq_sta = &mvmsta->lq_sta.rs_fw; @@ -251,6 +253,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags); } +out: + rcu_read_unlock(); } void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 60abb0084ee5..5d776ec1840f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, - struct rs_rate *rate) + struct rs_rate *rate, + bool init) { int i, nentries; unsigned long active_rate; @@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, */ if (sta->vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { - switch (sta->bandwidth) { - case IEEE80211_STA_RX_BW_160: - case IEEE80211_STA_RX_BW_80: - case IEEE80211_STA_RX_BW_40: + /* + * In AP mode, when a new station associates, rs is initialized + * immediately upon association completion, before the phy + * context is updated with the association parameters, so the + * sta bandwidth might be wider than the phy context allows. + * To avoid this issue, always initialize rs with 20mhz + * bandwidth rate, and after authorization, when the phy context + * is already up-to-date, re-init rs with the correct bw. + */ + u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + + switch (bw) { + case RATE_MCS_CHAN_WIDTH_40: + case RATE_MCS_CHAN_WIDTH_80: + case RATE_MCS_CHAN_WIDTH_160: initial_rates = rs_optimal_rates_vht; nentries = ARRAY_SIZE(rs_optimal_rates_vht); break; - case IEEE80211_STA_RX_BW_20: + case RATE_MCS_CHAN_WIDTH_20: initial_rates = rs_optimal_rates_vht_20mhz; nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); break; @@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; - rate->bw = rs_bw_from_sta_bw(sta); + rate->bw = bw; } else if (sta->ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; @@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, sta, lq_sta, band, rate); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, @@ -3998,18 +4010,18 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, if (!mvmsta->vif) return; - debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + debugfs_create_file("rate_scale_table", 0600, dir, lq_sta, &rs_sta_dbgfs_scale_table_ops); - debugfs_create_file("rate_stats_table", S_IRUSR, dir, + debugfs_create_file("rate_stats_table", 0400, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); - debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir, + debugfs_create_file("drv_tx_stats", 0600, dir, lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops); - debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + debugfs_create_u8("tx_agg_tid_enable", 0600, dir, &lq_sta->tx_agg_tid_en); - debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir, + debugfs_create_u8("reduced_tpc", 0600, dir, &lq_sta->pers.dbg_fixed_txp_reduction); - MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600); return; err: IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a3f7c1bf3cc8..4a4ccfd11e5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; + int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; @@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, pn[4] = extiv[1]; pn[5] = extiv[0]; - if (memcmp(pn, ptk_pn->q[queue].pn[tid], - IEEE80211_CCMP_PN_LEN) <= 0) + res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); + if (res < 0) + return -1; + if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; - if (!(stats->flag & RX_FLAG_AMSDU_MORE)) - memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; @@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, } /* - * returns true if a packet outside BA session is a duplicate and - * should be dropped + * returns true if a packet is a duplicate and should be dropped. + * Updates AMSDU PN tracking info */ -static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, - struct ieee80211_rx_status *rx_status, - struct ieee80211_hdr *hdr, - struct iwl_rx_mpdu_desc *desc) +static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, + struct ieee80211_rx_status *rx_status, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; - u8 baid, tid, sub_frame_idx; + u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; - baid = (le32_to_cpu(desc->reorder_data) & - IWL_RX_MPDU_REORDER_BAID_MASK) >> - IWL_RX_MPDU_REORDER_BAID_SHIFT; - - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) - return false; - mvm_sta = iwl_mvm_sta_from_mac80211(sta); dup_data = &mvm_sta->dup_data[queue]; @@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; + /* Allow same PN as the first subframe for following sub frames */ + if (dup_data->last_seq[tid] == hdr->seq_ctrl && + sub_frame_idx > dup_data->last_sub_frame[tid] && + desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) + rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; + dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; @@ -830,6 +831,16 @@ out: rcu_read_unlock(); } +static void iwl_mvm_flip_address(u8 *addr) +{ + int i; + u8 mac_addr[ETH_ALEN]; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = addr[ETH_ALEN - i - 1]; + ether_addr_copy(addr, mac_addr); +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -971,7 +982,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); - if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { + if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } @@ -984,21 +995,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { - int i; u8 *qc = ieee80211_get_qos_ctl(hdr); - u8 mac_addr[ETH_ALEN]; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1]; - ether_addr_copy(hdr->addr3, mac_addr); + if (mvm->trans->cfg->device_family == + IWL_DEVICE_FAMILY_9000) { + iwl_mvm_flip_address(hdr->addr3); - if (ieee80211_has_a4(hdr->frame_control)) { - for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = - hdr->addr4[ETH_ALEN - i - 1]; - ether_addr_copy(hdr->addr4, mac_addr); + if (ieee80211_has_a4(hdr->frame_control)) + iwl_mvm_flip_address(hdr->addr4); } } if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 356b16f40e78..b31f0ffbbbf0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -35,6 +35,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,6 +86,17 @@ enum iwl_mvm_traffic_load { #define IWL_SCAN_DWELL_PASSIVE 110 #define IWL_SCAN_DWELL_FRAGMENTED 44 #define IWL_SCAN_DWELL_EXTENDED 90 +#define IWL_SCAN_NUM_OF_FRAGS 3 + + +/* adaptive dwell max budget time [TU] for full scan */ +#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 +/* adaptive dwell max budget time [TU] for directed scan */ +#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 +/* adaptive dwell default APs number */ +#define IWL_SCAN_ADWELL_DEFAULT_N_APS 2 +/* adaptive dwell default APs number in social channels (1, 6, 11) */ +#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 struct iwl_mvm_scan_timing_params { u32 suspend_time; @@ -134,6 +146,9 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) + return (void *)&cmd->v8.data; + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return (void *)&cmd->v7.data; @@ -143,6 +158,23 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) return (void *)&cmd->v1.data; } +static inline struct iwl_scan_umac_chan_param * +iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + + if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) + return &cmd->v8.channel; + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + return &cmd->v7.channel; + + if (iwl_mvm_has_new_tx_api(mvm)) + return &cmd->v6.channel; + + return &cmd->v1.channel; +} + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -1113,66 +1145,92 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { - struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + struct iwl_mvm_scan_timing_params *timing, *hb_timing; + u8 active_dwell, passive_dwell; - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); + timing = &scan_timing[params->type]; + active_dwell = params->measurement_dwell ? + params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; + passive_dwell = params->measurement_dwell ? + params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { - if (params->measurement_dwell) { - cmd->v7.active_dwell = params->measurement_dwell; - cmd->v7.passive_dwell = params->measurement_dwell; - } else { - cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE; - } - cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v7.adwell_default_n_aps_social = + IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; + cmd->v7.adwell_default_n_aps = + IWL_SCAN_ADWELL_DEFAULT_N_APS; + + /* if custom max budget was configured with debugfs */ + if (IWL_MVM_ADWELL_MAX_BUDGET) + cmd->v7.adwell_max_budget = + cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); + else if (params->ssids && params->ssids[0].ssid_len) + cmd->v7.adwell_max_budget = + cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); + else + cmd->v7.adwell_max_budget = + cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + hb_timing = &scan_timing[params->type]; + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = - cpu_to_le32(timing->max_out_time); + cpu_to_le32(hb_timing->max_out_time); cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = - cpu_to_le32(timing->suspend_time); + cpu_to_le32(hb_timing->suspend_time); } - return; - } - - if (params->measurement_dwell) { - cmd->v1.active_dwell = params->measurement_dwell; - cmd->v1.passive_dwell = params->measurement_dwell; - cmd->v1.extended_dwell = params->measurement_dwell; + if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { + cmd->v7.active_dwell = active_dwell; + cmd->v7.passive_dwell = passive_dwell; + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + } else { + cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; + cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] = + active_dwell; + cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] = + passive_dwell; + } + } } else { - cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE; - cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; - } - cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v1.extended_dwell = params->measurement_dwell ? + params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED; + cmd->v1.active_dwell = active_dwell; + cmd->v1.passive_dwell = passive_dwell; + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; - if (iwl_mvm_has_new_tx_api(mvm)) { - cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = - cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = - cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { + hb_timing = &scan_timing[params->type]; + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = - cpu_to_le32(timing->max_out_time); + cpu_to_le32(hb_timing->max_out_time); cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(hb_timing->suspend_time); + } + + if (iwl_mvm_has_new_tx_api(mvm)) { + cmd->v6.scan_priority = + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + } else { + cmd->v1.scan_priority = + cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v1.max_out_time = + cpu_to_le32(timing->max_out_time); + cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); } - } else { - cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time); - cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); - cmd->v1.scan_priority = - cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } } @@ -1234,11 +1292,39 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; + if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE && + vif->type != NL80211_IFTYPE_P2P_DEVICE) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL; + + /* + * Extended dwell is relevant only for low band to start with, as it is + * being used for social channles only (1, 6, 11), so we can check + * only scan type on low band also for CDB. + */ if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && - params->type != IWL_SCAN_TYPE_FRAGMENTED) + params->type != IWL_SCAN_TYPE_FRAGMENTED && + !iwl_mvm_is_adaptive_dwell_supported(mvm) && + !iwl_mvm_is_oce_supported(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; + if (iwl_mvm_is_oce_supported(mvm)) { + if ((params->flags & + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE; + /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and + * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares + * the same bit, we need to make sure that we use this bit here + * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be + * used. */ + if ((params->flags & + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && + !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm))) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP; + if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME; + } + return flags; } @@ -1247,6 +1333,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + struct iwl_scan_umac_chan_param *chan_param; void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * @@ -1254,8 +1341,11 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int uid, i; u32 ssid_bitmap = 0; u8 channel_flags = 0; + u16 gen_flags; struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); + chan_param = iwl_mvm_get_scan_req_umac_channel(mvm); + lockdep_assert_held(&mvm->mutex); if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) @@ -1272,8 +1362,17 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_uid_status[uid] = type; cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params, - vif)); + gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); + cmd->general_flags = cpu_to_le16(gen_flags); + if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { + if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED) + cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] = + IWL_SCAN_NUM_OF_FRAGS; + if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) + cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = + IWL_SCAN_NUM_OF_FRAGS; + } + cmd->scan_start_mac_id = scan_vif->id; if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) @@ -1284,16 +1383,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { - cmd->v7.channel_flags = channel_flags; - cmd->v7.n_channels = params->n_channels; - } else if (iwl_mvm_has_new_tx_api(mvm)) { - cmd->v6.channel_flags = channel_flags; - cmd->v6.n_channels = params->n_channels; - } else { - cmd->v1.channel_flags = channel_flags; - cmd->v1.n_channels = params->n_channels; - } + chan_param->flags = channel_flags; + chan_param->count = params->n_channels; iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); @@ -1732,7 +1823,9 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; + else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; else if (iwl_mvm_has_new_tx_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 6b2674e02606..80067eb9ea05 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1695,7 +1695,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; @@ -2039,7 +2040,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = msta->sta_id, - .tid = IWL_MAX_TID_COUNT, + .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; @@ -2053,6 +2054,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return -ENOTSUPP; /* + * In IBSS, ieee80211_check_queues() sets the cab_queue to be + * invalid, so make sure we use the queue we want. + * Note that this is done here as we want to avoid making DQA + * changes in mac80211 layer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) { + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->cab_queue = vif->cab_queue; + } + + /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. */ @@ -2079,24 +2091,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, msta->sta_id, - IWL_MAX_TID_COUNT, + 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_STA_TYPE)) { - /* - * In IBSS, ieee80211_check_queues() sets the cab_queue to be - * invalid, so make sure we use the queue we want. - * Note that this is done here as we want to avoid making DQA - * changes in mac80211 layer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) { - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - mvmvif->cab_queue = vif->cab_queue; - } + IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, &cfg, timeout); - } return 0; } @@ -2115,7 +2116,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); + 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2478,28 +2479,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* * Note the possible cases: - * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed - * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free - * one and mark it as reserved - * 3. In DQA mode, but no traffic yet on this TID: same treatment as in - * non-DQA mode, since the TXQ hasn't yet been allocated - * Don't support case 3 for new TX path as it is not expected to happen - * and aggregation will be offloaded soon anyway + * 1. An enabled TXQ - TXQ needs to become agg'ed + * 2. The TXQ hasn't yet been enabled, so find a free one and mark + * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (txq_id == IWL_MVM_INVALID_QUEUE) { - ret = -ENXIO; - goto release_locks; - } - } else if (unlikely(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_SHARED)) { - ret = -ENXIO; - IWL_DEBUG_TX_QUEUES(mvm, - "Can't start tid %d agg on shared queue!\n", - tid); - goto release_locks; - } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); @@ -2508,16 +2493,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } - /* - * TXQ shouldn't be in inactive mode for non-DQA, so getting - * an inactive queue from iwl_mvm_find_free_queue() is - * certainly a bug - */ - WARN_ON(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_INACTIVE); /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; + } else if (unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED)) { + ret = -ENXIO; + IWL_DEBUG_TX_QUEUES(mvm, + "Can't start tid %d agg on shared queue!\n", + tid); + goto release_locks; } spin_unlock(&mvm->queue_info_lock); @@ -2696,8 +2681,10 @@ out: static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - u16 txq_id) + struct iwl_mvm_tid_data *tid_data) { + u16 txq_id = tid_data->txq_id; + if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2709,8 +2696,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + tid_data->txq_id = IWL_MVM_INVALID_QUEUE; + } spin_unlock_bh(&mvm->queue_info_lock); } @@ -2741,7 +2730,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: @@ -2808,7 +2797,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); @@ -3170,8 +3159,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, int ret, size; u32 status; + /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) - return -EINVAL; + return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); @@ -3232,17 +3222,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } sta_id = mvm_sta->sta_id; - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { - ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, - false); - goto end; - } - /* * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station + * there is a need to retrieve the sta from the local station * table. */ if (!sta) { @@ -3257,6 +3239,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; + } else { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + sta_id = mvmvif->mcast_sta.sta_id; + } + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { + ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); + goto end; } /* If the key_offset is not pre-assigned, we need to find a diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 200ab50ec86b..cd91bc44259c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -198,9 +195,13 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type != NL80211_IFTYPE_STATION) return false; - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) + + if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && + vif->bss_conf.dtim_period) return false; if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); @@ -344,7 +345,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, - "No association and the time event is over already..."); + "No beacon heard and the time event is over already..."); break; default: break; @@ -616,7 +617,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); if (!wait_for_notif) { iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); @@ -803,7 +804,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } @@ -913,6 +914,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, time_cmd.interval = cpu_to_le32(1); time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE); + if (!apply_time) + time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dda77b327c98..795065974d78 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; + enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: + type = TX_CMD_SEC_GCMP; + /* Fall through */ + case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ - tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { - sta_id = mvmvif->bcast_sta.sta_id; + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + sta_id = mvmvif->bcast_sta.sta_id; + else + sta_id = mvmvif->mcast_sta.sta_id; + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); if (queue < 0) @@ -680,6 +687,74 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } #ifdef CONFIG_INET + +static int +iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, + netdev_features_t netdev_flags, + struct sk_buff_head *mpdus_skb) +{ + struct sk_buff *tmp, *next; + struct ieee80211_hdr *hdr = (void *)skb->data; + char cb[sizeof(skb->cb)]; + u16 i = 0; + unsigned int tcp_payload_len; + unsigned int mss = skb_shinfo(skb)->gso_size; + bool ipv4 = (skb->protocol == htons(ETH_P_IP)); + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; + + skb_shinfo(skb)->gso_size = num_subframes * mss; + memcpy(cb, skb->cb, sizeof(cb)); + + next = skb_gso_segment(skb, netdev_flags); + skb_shinfo(skb)->gso_size = mss; + if (WARN_ON_ONCE(IS_ERR(next))) + return -EINVAL; + else if (next) + consume_skb(skb); + + while (next) { + tmp = next; + next = tmp->next; + + memcpy(tmp->cb, cb, sizeof(tmp->cb)); + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. + */ + tcp_payload_len = skb_tail_pointer(tmp) - + skb_transport_header(tmp) - + tcp_hdrlen(tmp) + tmp->data_len; + + if (ipv4) + ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); + + if (tcp_payload_len > mss) { + skb_shinfo(tmp)->gso_size = mss; + } else { + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc; + + if (ipv4) + ip_send_check(ip_hdr(tmp)); + + qc = ieee80211_get_qos_ctl((void *)tmp->data); + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } + skb_shinfo(tmp)->gso_size = 0; + } + + tmp->prev = NULL; + tmp->next = NULL; + + __skb_queue_tail(mpdus_skb, tmp); + i++; + } + + return 0; +} + static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -688,14 +763,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; - struct sk_buff *tmp, *next; - char cb[sizeof(skb->cb)]; unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; - bool ipv4 = (skb->protocol == htons(ETH_P_IP)); - u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; - u16 snap_ip_tcp, pad, i = 0; + u16 snap_ip_tcp, pad; unsigned int dbg_max_amsdu_len; - netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; + netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 *qc, tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + @@ -705,16 +776,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || - (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) { - num_subframes = 1; - pad = 0; - goto segment; - } - - qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) - return -EINVAL; + (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) + return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. @@ -723,22 +786,22 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IPV6) && ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { - num_subframes = 1; - pad = 0; - netdev_features &= ~NETIF_F_CSUM_MASK; - goto segment; + netdev_flags &= ~NETIF_F_CSUM_MASK; + return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ if (info->flags & IEEE80211_TX_CTL_AMPDU && - !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) { - num_subframes = 1; - pad = 0; - goto segment; - } + !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) + return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); max_amsdu_len = sta->max_amsdu_len; @@ -804,56 +867,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ -segment: - skb_shinfo(skb)->gso_size = num_subframes * mss; - memcpy(cb, skb->cb, sizeof(cb)); - - next = skb_gso_segment(skb, netdev_features); - skb_shinfo(skb)->gso_size = mss; - if (WARN_ON_ONCE(IS_ERR(next))) - return -EINVAL; - else if (next) - consume_skb(skb); - - while (next) { - tmp = next; - next = tmp->next; - - memcpy(tmp->cb, cb, sizeof(tmp->cb)); - /* - * Compute the length of all the data added for the A-MSDU. - * This will be used to compute the length to write in the TX - * command. We have: SNAP + IP + TCP for n -1 subframes and - * ETH header for n subframes. - */ - tcp_payload_len = skb_tail_pointer(tmp) - - skb_transport_header(tmp) - - tcp_hdrlen(tmp) + tmp->data_len; - - if (ipv4) - ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); - - if (tcp_payload_len > mss) { - skb_shinfo(tmp)->gso_size = mss; - } else { - if (ieee80211_is_data_qos(hdr->frame_control)) { - qc = ieee80211_get_qos_ctl((void *)tmp->data); - - if (ipv4) - ip_send_check(ip_hdr(tmp)); - *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - } - skb_shinfo(tmp)->gso_size = 0; - } - - tmp->prev = NULL; - tmp->next = NULL; - - __skb_queue_tail(mpdus_skb, tmp); - i++; - } - - return 0; + return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, + mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, @@ -1887,14 +1902,12 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (internal) - return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id, - BIT(IWL_MGMT_TID), flags); + BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != + offsetof(struct iwl_mvm_sta, sta_id)); + if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, - 0xFF, flags); - } + 0xff | BIT(IWL_MGMT_TID), flags); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d65e1db7c097..d99d9ea78e4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -549,12 +549,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.data3, - table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.fw_rev_type, table.major, - table.minor, table.hw_ver, table.brd_ver); + trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); @@ -800,12 +795,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; - bool remove_mac_queue = true; + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; int ret; + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) + return -EINVAL; + if (iwl_mvm_has_new_tx_api(mvm)) { spin_lock_bh(&mvm->queue_info_lock); - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + spin_unlock_bh(&mvm->queue_info_lock); iwl_trans_txq_free(mvm->trans, queue); @@ -1027,14 +1029,18 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool prev) + bool low_latency, + enum iwl_mvm_low_latency_cause cause) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; - bool low_latency; + bool prev; lockdep_assert_held(&mvm->mutex); + prev = iwl_mvm_vif_low_latency(mvmvif); + iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause); + low_latency = iwl_mvm_vif_low_latency(mvmvif); if (low_latency == prev) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 56fc28750a41..959de2f8bb28 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -36,6 +37,7 @@ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -517,9 +519,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, @@ -544,11 +546,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, @@ -569,38 +575,146 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, @@ -626,11 +740,44 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, @@ -647,10 +794,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, /* 22000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b406b536c850..f8a0234d332c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2616,12 +2616,12 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir; - DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); - DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); - DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); - DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); - DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); - DEBUGFS_ADD_FILE(rfkill, dir, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir, 0400); + DEBUGFS_ADD_FILE(tx_queue, dir, 0400); + DEBUGFS_ADD_FILE(interrupt, dir, 0600); + DEBUGFS_ADD_FILE(csr, dir, 0200); + DEBUGFS_ADD_FILE(fh_reg, dir, 0400); + DEBUGFS_ADD_FILE(rfkill, dir, 0600); return 0; err: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 6d0a907d5ba5..fabae0f60683 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, /* Sanity check on number of chunks */ num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); - if (num_tbs >= trans_pcie->max_tbs) { + if (num_tbs > trans_pcie->max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 3f85713c41dc..1a566287993d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* Sanity check on number of chunks */ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); - if (num_tbs >= trans_pcie->max_tbs) { + if (num_tbs > trans_pcie->max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; diff --git a/drivers/net/wireless/intersil/Kconfig b/drivers/net/wireless/intersil/Kconfig index 9da136049955..e89fce1d4f27 100644 --- a/drivers/net/wireless/intersil/Kconfig +++ b/drivers/net/wireless/intersil/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_INTERSIL If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_INTERSIL diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index ab6d39e12069..1c6d428515a4 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -27,7 +27,7 @@ #include "lmac.h" static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_DESCRIPTION("Softmac Prism54 common code"); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1cf22e62e3dd..6afe896e5cb8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -253,7 +253,7 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) static unsigned int hwsim_net_id; -static int hwsim_netgroup; +static struct ida hwsim_netgroup_ida = IDA_INIT; struct hwsim_net { int netgroup; @@ -267,11 +267,13 @@ static inline int hwsim_net_get_netgroup(struct net *net) return hwsim_net->netgroup; } -static inline void hwsim_net_set_netgroup(struct net *net) +static inline int hwsim_net_set_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); - hwsim_net->netgroup = hwsim_netgroup++; + hwsim_net->netgroup = ida_simple_get(&hwsim_netgroup_ida, + 0, 0, GFP_KERNEL); + return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM; } static inline u32 hwsim_net_get_wmediumd(struct net *net) @@ -493,6 +495,7 @@ static LIST_HEAD(hwsim_radios); static struct workqueue_struct *hwsim_wq; static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; +static int hwsim_radios_generation = 1; static struct platform_driver mac80211_hwsim_driver = { .driver = { @@ -637,6 +640,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, + [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, }; static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, @@ -2408,6 +2412,7 @@ struct hwsim_new_radio_params { bool destroy_on_close; const char *hwname; bool no_vif; + const u8 *perm_addr; }; static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, @@ -2572,15 +2577,25 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, skb_queue_head_init(&data->pending); SET_IEEE80211_DEV(hw, data->dev); - eth_zero_addr(addr); - addr[0] = 0x02; - addr[3] = idx >> 8; - addr[4] = idx; - memcpy(data->addresses[0].addr, addr, ETH_ALEN); - memcpy(data->addresses[1].addr, addr, ETH_ALEN); - data->addresses[1].addr[0] |= 0x40; - hw->wiphy->n_addresses = 2; - hw->wiphy->addresses = data->addresses; + if (!param->perm_addr) { + eth_zero_addr(addr); + addr[0] = 0x02; + addr[3] = idx >> 8; + addr[4] = idx; + memcpy(data->addresses[0].addr, addr, ETH_ALEN); + /* Why need here second address ? */ + memcpy(data->addresses[1].addr, addr, ETH_ALEN); + data->addresses[1].addr[0] |= 0x40; + hw->wiphy->n_addresses = 2; + hw->wiphy->addresses = data->addresses; + /* possible address clash is checked at hash table insertion */ + } else { + memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN); + /* compatibility with automatically generated mac addr */ + memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN); + hw->wiphy->n_addresses = 2; + hw->wiphy->addresses = data->addresses; + } data->channels = param->channels; data->use_chanctx = param->use_chanctx; @@ -2727,6 +2742,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, mutex_init(&data->mutex); data->netgroup = hwsim_net_get_netgroup(net); + data->wmediumd = hwsim_net_get_wmediumd(net); /* Enable frame retransmissions for lossy channels */ hw->max_rates = 4; @@ -2785,13 +2801,17 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); if (err < 0) { - pr_debug("mac80211_hwsim: radio index %d already present\n", - idx); + if (info) { + GENL_SET_ERR_MSG(info, "perm addr already present"); + NL_SET_BAD_ATTR(info->extack, + info->attrs[HWSIM_ATTR_PERM_ADDR]); + } spin_unlock_bh(&hwsim_radio_lock); goto failed_final_insert; } list_add_tail(&data->list, &hwsim_radios); + hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); if (idx > 0) @@ -3210,6 +3230,19 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.regd = hwsim_world_regdom_custom[idx]; } + if (info->attrs[HWSIM_ATTR_PERM_ADDR]) { + if (!is_valid_ether_addr( + nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]))) { + GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); + NL_SET_BAD_ATTR(info->extack, + info->attrs[HWSIM_ATTR_PERM_ADDR]); + return -EINVAL; + } + + + param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]); + } + ret = mac80211_hwsim_new_radio(info, ¶m); kfree(hwname); return ret; @@ -3249,6 +3282,7 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) list_del(&data->list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); + hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); @@ -3305,17 +3339,19 @@ out_err: static int hwsim_dump_radio_nl(struct sk_buff *skb, struct netlink_callback *cb) { - int idx = cb->args[0]; + int last_idx = cb->args[0]; struct mac80211_hwsim_data *data = NULL; - int res; + int res = 0; + void *hdr; spin_lock_bh(&hwsim_radio_lock); + cb->seq = hwsim_radios_generation; - if (idx == hwsim_radio_idx) + if (last_idx >= hwsim_radio_idx-1) goto done; list_for_each_entry(data, &hwsim_radios, list) { - if (data->idx < idx) + if (data->idx <= last_idx) continue; if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) @@ -3328,14 +3364,25 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, if (res < 0) break; - idx = data->idx + 1; + last_idx = data->idx; } - cb->args[0] = idx; + cb->args[0] = last_idx; + + /* list changed, but no new element sent, set interrupted flag */ + if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, &hwsim_genl_family, + NLM_F_MULTI, HWSIM_CMD_GET_RADIO); + if (!hdr) + res = -EMSGSIZE; + genl_dump_check_consistent(cb, hdr); + genlmsg_end(skb, hdr); + } done: spin_unlock_bh(&hwsim_radio_lock); - return skb->len; + return res ?: skb->len; } /* Generic Netlink operations array */ @@ -3393,6 +3440,7 @@ static void destroy_radio(struct work_struct *work) struct mac80211_hwsim_data *data = container_of(work, struct mac80211_hwsim_data, destroy_work); + hwsim_radios_generation++; mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); } @@ -3462,9 +3510,7 @@ failure: static __net_init int hwsim_init_net(struct net *net) { - hwsim_net_set_netgroup(net); - - return 0; + return hwsim_net_set_netgroup(net); } static void __net_exit hwsim_exit_net(struct net *net) @@ -3483,10 +3529,16 @@ static void __net_exit hwsim_exit_net(struct net *net) list_del(&data->list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); - INIT_WORK(&data->destroy_work, destroy_radio); - queue_work(hwsim_wq, &data->destroy_work); + hwsim_radios_generation++; + spin_unlock_bh(&hwsim_radio_lock); + mac80211_hwsim_del_radio(data, + wiphy_name(data->hw->wiphy), + NULL); + spin_lock_bh(&hwsim_radio_lock); } spin_unlock_bh(&hwsim_radio_lock); + + ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); } static struct pernet_operations hwsim_net_ops = { @@ -3516,7 +3568,7 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); - hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); + hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); if (!hwsim_wq) return -ENOMEM; rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index a96a79c1eff5..0fe3199f8c72 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -68,7 +68,12 @@ enum hwsim_tx_control_flags { * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, * returns the radio ID (>= 0) or negative on errors, if successful - * then multicast the result + * then multicast the result, uses optional parameter: + * %HWSIM_ATTR_REG_STRICT_REG, %HWSIM_ATTR_SUPPORT_P2P_DEVICE, + * %HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, %HWSIM_ATTR_CHANNELS, + * %HWSIM_ATTR_NO_VIF, %HWSIM_ATTR_RADIO_NAME, %HWSIM_ATTR_USE_CHANCTX, + * %HWSIM_ATTR_REG_HINT_ALPHA2, %HWSIM_ATTR_REG_CUSTOM_REG, + * %HWSIM_ATTR_PERM_ADDR * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: * %HWSIM_ATTR_RADIO_ID @@ -126,6 +131,7 @@ enum { * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding * rates of %HWSIM_ATTR_TX_INFO + * @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio * @__HWSIM_ATTR_MAX: enum limit */ @@ -153,6 +159,7 @@ enum { HWSIM_ATTR_FREQ, HWSIM_ATTR_PAD, HWSIM_ATTR_TX_INFO_FLAGS, + HWSIM_ATTR_PERM_ADDR, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) diff --git a/drivers/net/wireless/marvell/Kconfig b/drivers/net/wireless/marvell/Kconfig index 4938c7ec0009..27038901d3ee 100644 --- a/drivers/net/wireless/marvell/Kconfig +++ b/drivers/net/wireless/marvell/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_MARVELL If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_MARVELL diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 8772e3949327..5d75c971004b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -341,6 +341,38 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, le16_to_cpu(ht_cap->header.len)); mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap); + /* Update HT40 capability from current channel information */ + if (bss_desc->bcn_ht_oper) { + u8 ht_param = bss_desc->bcn_ht_oper->ht_param; + u8 radio = + mwifiex_band_to_radio_type(bss_desc->bss_band); + int freq = + ieee80211_channel_to_frequency(bss_desc->channel, + radio); + struct ieee80211_channel *chan = + ieee80211_get_channel(priv->adapter->wiphy, freq); + + switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) { + ht_cap->ht_cap.cap_info &= + cpu_to_le16 + (~IEEE80211_HT_CAP_SUP_WIDTH_20_40); + ht_cap->ht_cap.cap_info &= + cpu_to_le16(~IEEE80211_HT_CAP_SGI_40); + } + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) { + ht_cap->ht_cap.cap_info &= + cpu_to_le16 + (~IEEE80211_HT_CAP_SUP_WIDTH_20_40); + ht_cap->ht_cap.cap_info &= + cpu_to_le16(~IEEE80211_HT_CAP_SGI_40); + } + break; + } + } *buffer += sizeof(struct mwifiex_ie_types_htcap); ret_len += sizeof(struct mwifiex_ie_types_htcap); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index ce4432c535f0..7f7e9de2db1c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -95,18 +95,32 @@ u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type) /* This function maps IEEE HT secondary channel type to NL80211 channel type */ -u8 mwifiex_sec_chan_offset_to_chan_type(u8 second_chan_offset) +u8 mwifiex_get_chan_type(struct mwifiex_private *priv) { - switch (second_chan_offset) { - case IEEE80211_HT_PARAM_CHA_SEC_NONE: - return NL80211_CHAN_HT20; - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - return NL80211_CHAN_HT40PLUS; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - return NL80211_CHAN_HT40MINUS; - default: - return NL80211_CHAN_HT20; + struct mwifiex_channel_band channel_band; + int ret; + + ret = mwifiex_get_chan_info(priv, &channel_band); + + if (!ret) { + switch (channel_band.band_config.chan_width) { + case CHAN_BW_20MHZ: + if (IS_11N_ENABLED(priv)) + return NL80211_CHAN_HT20; + else + return NL80211_CHAN_NO_HT; + case CHAN_BW_40MHZ: + if (channel_band.band_config.chan2_offset == + SEC_CHAN_ABOVE) + return NL80211_CHAN_HT40PLUS; + else + return NL80211_CHAN_HT40MINUS; + default: + return NL80211_CHAN_HT20; + } } + + return NL80211_CHAN_HT20; } /* @@ -3937,7 +3951,6 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); struct mwifiex_bssdescriptor *curr_bss; struct ieee80211_channel *chan; - u8 second_chan_offset; enum nl80211_channel_type chan_type; enum nl80211_band band; int freq; @@ -3954,10 +3967,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy, chan = ieee80211_get_channel(wiphy, freq); if (priv->ht_param_present) { - second_chan_offset = priv->assoc_resp_ht_param & - IEEE80211_HT_PARAM_CHA_SEC_OFFSET; - chan_type = mwifiex_sec_chan_offset_to_chan_type - (second_chan_offset); + chan_type = mwifiex_get_chan_type(priv); cfg80211_chandef_create(chandef, chan, chan_type); } else { cfg80211_chandef_create(chandef, chan, diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 874660052055..7014f440e6f8 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1529,7 +1529,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff; - adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); + adapter->number_of_antenna = + le16_to_cpu(hw_spec->number_of_antenna) & 0xf; if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { adapter->is_hw_11ac_capable = true; diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index 188e4c370836..46696ea0b23e 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -294,4 +294,21 @@ enum rdwr_status { RDWR_STATUS_DONE = 2 }; +enum mwifiex_chan_width { + CHAN_BW_20MHZ = 0, + CHAN_BW_10MHZ, + CHAN_BW_40MHZ, + CHAN_BW_80MHZ, + CHAN_BW_8080MHZ, + CHAN_BW_160MHZ, + CHAN_BW_5MHZ, +}; + +enum mwifiex_chan_offset { + SEC_CHAN_NONE = 0, + SEC_CHAN_ABOVE = 1, + SEC_CHAN_5MHZ = 2, + SEC_CHAN_BELOW = 3 +}; + #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 9c2cdef54074..c5dc518f768b 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -411,6 +411,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_TDLS_OPER 0x0122 #define HostCmd_CMD_FW_DUMP_EVENT 0x0125 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223 +#define HostCmd_CMD_STA_CONFIGURE 0x023f #define HostCmd_CMD_CHAN_REGION_CFG 0x0242 #define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251 @@ -2285,6 +2286,11 @@ struct host_cmd_ds_pkt_aggr_ctrl { __le16 tx_aggr_align; } __packed; +struct host_cmd_ds_sta_configure { + __le16 action; + u8 tlv_buffer[0]; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2361,6 +2367,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_gtk_rekey_params rekey; struct host_cmd_ds_chan_region_cfg reg_cfg; struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl; + struct host_cmd_ds_sta_configure sta_cfg; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 12e739950332..b6484582845a 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -943,13 +943,26 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, struct net_device *dev) { int ret; - u64 mac_addr; + u64 mac_addr, old_mac_addr; - if (priv->bss_type != MWIFIEX_BSS_TYPE_P2P) - goto done; + if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) + return -ENOTSUPP; mac_addr = ether_addr_to_u64(priv->curr_addr); - mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + old_mac_addr = mac_addr; + + if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) + mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + + if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { + /* Set mac address based on bss_type/bss_num */ + mac_addr ^= BIT_ULL(priv->bss_type + 8); + mac_addr += priv->bss_num; + } + + if (mac_addr == old_mac_addr) + goto done; + u64_to_ether_addr(mac_addr, priv->curr_addr); /* Send request to firmware */ @@ -957,13 +970,14 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, HostCmd_ACT_GEN_SET, 0, NULL, true); if (ret) { + u64_to_ether_addr(old_mac_addr, priv->curr_addr); mwifiex_dbg(priv->adapter, ERROR, "set mac address failed: ret=%d\n", ret); return ret; } done: - memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); + ether_addr_copy(dev->dev_addr, priv->curr_addr); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 6b5539b1f4d8..9bde181700dc 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -517,6 +517,18 @@ enum mwifiex_iface_work_flags { MWIFIEX_IFACE_WORK_CARD_RESET, }; +struct mwifiex_band_config { + u8 chan_band:2; + u8 chan_width:2; + u8 chan2_offset:2; + u8 scan_mode:2; +} __packed; + +struct mwifiex_channel_band { + struct mwifiex_band_config band_config; + u8 channel; +}; + struct mwifiex_private { struct mwifiex_adapter *adapter; u8 bss_type; @@ -1280,6 +1292,19 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len) return pos; } +/* This function return interface number with the same bss_type. + */ +static inline u8 +mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type) +{ + u8 i, num = 0; + + for (i = 0; i < adapter->priv_num; i++) + if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type) + num++; + return num; +} + /* * This function returns the correct private structure pointer based * upon the BSS type and BSS number. @@ -1544,7 +1569,7 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type); -u8 mwifiex_sec_chan_offset_to_chan_type(u8 second_chan_offset); +u8 mwifiex_get_chan_type(struct mwifiex_private *priv); struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, const char *name, @@ -1670,6 +1695,8 @@ void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, int cmd_type, struct mwifiex_ds_wakeup_reason *wakeup_reason); +int mwifiex_get_chan_info(struct mwifiex_private *priv, + struct mwifiex_channel_band *channel_band); int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct host_cmd_ds_wakeup_reason *wakeup_reason); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 211e47d8b318..4ed10cf82f9a 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1898,6 +1898,25 @@ static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv, return 0; } +static int mwifiex_cmd_get_chan_info(struct host_cmd_ds_command *cmd, + u16 cmd_action) +{ + struct host_cmd_ds_sta_configure *sta_cfg_cmd = &cmd->params.sta_cfg; + struct host_cmd_tlv_channel_band *tlv_band_channel = + (struct host_cmd_tlv_channel_band *)sta_cfg_cmd->tlv_buffer; + + cmd->command = cpu_to_le16(HostCmd_CMD_STA_CONFIGURE); + cmd->size = cpu_to_le16(sizeof(*sta_cfg_cmd) + + sizeof(*tlv_band_channel) + S_DS_GEN); + sta_cfg_cmd->action = cpu_to_le16(cmd_action); + memset(tlv_band_channel, 0, sizeof(*tlv_band_channel)); + tlv_band_channel->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); + tlv_band_channel->header.len = cpu_to_le16(sizeof(*tlv_band_channel) - + sizeof(struct mwifiex_ie_types_header)); + + return 0; +} + /* This function check if the command is supported by firmware */ static int mwifiex_is_cmd_supported(struct mwifiex_private *priv, u16 cmd_no) { @@ -2210,6 +2229,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->size = cpu_to_le16(S_DS_GEN); break; + case HostCmd_CMD_STA_CONFIGURE: + ret = mwifiex_cmd_get_chan_info(cmd_ptr, cmd_action); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 1bd4e13b8449..69e3b624adbb 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1170,6 +1170,22 @@ static int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv, return 0; } +static int mwifiex_ret_get_chan_info(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct mwifiex_channel_band *channel_band) +{ + struct host_cmd_ds_sta_configure *sta_cfg_cmd = &resp->params.sta_cfg; + struct host_cmd_tlv_channel_band *tlv_band_channel; + + tlv_band_channel = + (struct host_cmd_tlv_channel_band *)sta_cfg_cmd->tlv_buffer; + memcpy(&channel_band->band_config, &tlv_band_channel->band_config, + sizeof(struct mwifiex_band_config)); + channel_band->channel = tlv_band_channel->channel; + + return 0; +} + /* * This function handles the command responses. * @@ -1393,6 +1409,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_CHAN_REGION_CFG: ret = mwifiex_ret_chan_region_cfg(priv, resp); break; + case HostCmd_CMD_STA_CONFIGURE: + ret = mwifiex_ret_get_chan_info(priv, resp, data_buf); + break; default: mwifiex_dbg(adapter, ERROR, "CMD_RESP: unknown cmd response %#x\n", diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index a6077ab3efc3..5414b755cf82 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -146,7 +146,6 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, size_t beacon_ie_len; struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; const struct cfg80211_bss_ies *ies; - int ret; rcu_read_lock(); ies = rcu_dereference(bss->ies); @@ -190,48 +189,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT) bss_desc->sensed_11h = true; - ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); - if (ret) - return ret; - - /* Update HT40 capability based on current channel information */ - if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) { - u8 ht_param = bss_desc->bcn_ht_oper->ht_param; - u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band); - struct ieee80211_supported_band *sband = - priv->wdev.wiphy->bands[radio]; - int freq = ieee80211_channel_to_frequency(bss_desc->channel, - radio); - struct ieee80211_channel *chan = - ieee80211_get_channel(priv->adapter->wiphy, freq); - - switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) { - sband->ht_cap.cap &= - ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; - } else { - sband->ht_cap.cap |= - IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_SGI_40; - } - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) { - sband->ht_cap.cap &= - ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; - } else { - sband->ht_cap.cap |= - IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_SGI_40; - } - break; - } - } - - return 0; + return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); } void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv) @@ -1523,3 +1481,15 @@ int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, return status; } + +int mwifiex_get_chan_info(struct mwifiex_private *priv, + struct mwifiex_channel_band *channel_band) +{ + int status = 0; + + status = mwifiex_send_cmd(priv, HostCmd_CMD_STA_CONFIGURE, + HostCmd_ACT_GEN_GET, 0, channel_band, + MWIFIEX_SYNC_CMD); + + return status; +} diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig index 92ce4062f307..ff5fc8987b0a 100644 --- a/drivers/net/wireless/mediatek/Kconfig +++ b/drivers/net/wireless/mediatek/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_MEDIATEK If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_MEDIATEK diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index c121b502a462..a38d05dea599 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -64,13 +64,13 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev) if (!dir) return NULL; - debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin); - debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); - debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev, + debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin); + debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); + debugfs_create_file_unsafe("regval", 0600, dir, dev, &fops_regval); - debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom); + debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom); if (dev->otp.data) - debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp); + debugfs_create_blob("otp", 0400, dir, &dev->otp); debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read); return dir; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 85f8d324ebf8..4f30cdcd2b53 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -119,6 +119,52 @@ static int mt76_led_init(struct mt76_dev *dev) return devm_led_classdev_register(dev->dev, &dev->led_cdev); } +static void mt76_init_stream_cap(struct mt76_dev *dev, + struct ieee80211_supported_band *sband, + bool vht) +{ + struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; + int i, nstream = __sw_hweight8(dev->antenna_mask); + struct ieee80211_sta_vht_cap *vht_cap; + u16 mcs_map = 0; + + if (nstream > 1) + ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; + else + ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; + + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) + ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; + + if (!vht) + return; + + vht_cap = &sband->vht_cap; + if (nstream > 1) + vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; + else + vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; + + for (i = 0; i < 8; i++) { + if (i < nstream) + mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); + else + mcs_map |= + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); + } + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); +} + +void mt76_set_stream_caps(struct mt76_dev *dev, bool vht) +{ + if (dev->cap.has_2ghz) + mt76_init_stream_cap(dev, &dev->sband_2g.sband, false); + if (dev->cap.has_5ghz) + mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht); +} +EXPORT_SYMBOL_GPL(mt76_set_stream_caps); + static int mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, const struct ieee80211_channel *chan, int n_chan, @@ -128,7 +174,6 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; void *chanlist; - u16 mcs_map; int size; size = n_chan * sizeof(*chan); @@ -153,34 +198,20 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_TX_STBC | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); - ht_cap->mcs.rx_mask[0] = 0xff; - ht_cap->mcs.rx_mask[1] = 0xff; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + mt76_init_stream_cap(dev, sband, vht); + if (!vht) return 0; vht_cap = &sband->vht_cap; vht_cap->vht_supported = true; - - mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) | - (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) | - (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2)); - - vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); - vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | - IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SHORT_GI_80; @@ -262,6 +293,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + wiphy->available_antennas_tx = dev->antenna_mask; + wiphy->available_antennas_rx = dev->antenna_mask; + hw->txq_data_size = sizeof(struct mt76_txq); hw->max_tx_fragments = 16; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index d2ce15093edd..065ff78059c3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -253,6 +253,8 @@ struct mt76_dev { u32 rev; unsigned long state; + u8 antenna_mask; + struct mt76_sband sband_2g; struct mt76_sband sband_5g; struct debugfs_blob_wrapper eeprom; @@ -423,6 +425,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw, void mt76_set_channel(struct mt76_dev *dev); int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); +void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, u16 ssn, u8 size); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index e62131b88102..783b8122ec3c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -180,6 +180,7 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev); int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel); void mt76x2_set_tx_ackto(struct mt76x2_dev *dev); +void mt76x2_phy_set_antenna(struct mt76x2_dev *dev); int mt76x2_phy_start(struct mt76x2_dev *dev); int mt76x2_phy_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c index 612feb593d7d..955ea3e692dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -123,11 +123,11 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev) if (!dir) return; - debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp); - debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc); + debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); + debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); - debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); - debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat); + debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); + debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, read_txpower); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c index 9c9bf3e785ba..5bb50027c1e8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c @@ -222,11 +222,10 @@ static int mt76x2_eeprom_load(struct mt76x2_dev *dev) { void *efuse; - int len = MT7662_EEPROM_SIZE; bool found; int ret; - ret = mt76_eeprom_init(&dev->mt76, len); + ret = mt76_eeprom_init(&dev->mt76, MT7662_EEPROM_SIZE); if (ret < 0) return ret; @@ -234,14 +233,15 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev) if (found) found = !mt76x2_check_eeprom(dev); - dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL); - dev->mt76.otp.size = len; + dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, MT7662_EEPROM_SIZE, + GFP_KERNEL); + dev->mt76.otp.size = MT7662_EEPROM_SIZE; if (!dev->mt76.otp.data) return -ENOMEM; efuse = dev->mt76.otp.data; - if (mt76x2_get_efuse_data(dev, efuse, len)) + if (mt76x2_get_efuse_data(dev, efuse, MT7662_EEPROM_SIZE)) goto out; if (found) { @@ -249,7 +249,7 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev) } else { /* FIXME: check if efuse data is complete */ found = true; - memcpy(dev->mt76.eeprom.data, efuse, len); + memcpy(dev->mt76.eeprom.data, efuse, MT7662_EEPROM_SIZE); } out: diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 9dbf94947324..934c331d995e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -857,6 +857,9 @@ int mt76x2_register_device(struct mt76x2_dev *dev) dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + /* init antenna configuration */ + dev->mt76.antenna_mask = 3; + ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 7ea3d841918e..d18315652583 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -198,8 +198,8 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, ccmp_pn[5] = pn >> 24; ccmp_pn[6] = pn >> 32; ccmp_pn[7] = pn >> 40; - txwi->iv = *((u32 *) &ccmp_pn[0]); - txwi->eiv = *((u32 *) &ccmp_pn[1]); + txwi->iv = *((__le32 *)&ccmp_pn[0]); + txwi->eiv = *((__le32 *)&ccmp_pn[1]); } spin_lock_bh(&dev->mt76.lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 205043b470b2..73c127f92613 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -336,6 +336,17 @@ mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int idx = key->keyidx; int ret; + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + break; + default: + return -EOPNOTSUPP; + } + /* * The hardware does not support per-STA RX GTK, fall back * to software mode for these. @@ -549,6 +560,40 @@ mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) return 0; } +static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, + u32 rx_ant) +{ + struct mt76x2_dev *dev = hw->priv; + + if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant) + return -EINVAL; + + mutex_lock(&dev->mutex); + + dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101; + dev->mt76.antenna_mask = tx_ant; + + mt76_set_stream_caps(&dev->mt76, true); + mt76x2_phy_set_antenna(dev); + + mutex_unlock(&dev->mutex); + + return 0; +} + +static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, + u32 *rx_ant) +{ + struct mt76x2_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + *tx_ant = dev->mt76.antenna_mask; + *rx_ant = dev->mt76.antenna_mask; + mutex_unlock(&dev->mutex); + + return 0; +} + const struct ieee80211_ops mt76x2_ops = { .tx = mt76x2_tx, .start = mt76x2_start, @@ -573,5 +618,7 @@ const struct ieee80211_ops mt76x2_ops = { .set_coverage_class = mt76x2_set_coverage_class, .get_survey = mt76_get_survey, .set_tim = mt76x2_set_tim, + .set_antenna = mt76x2_set_antenna, + .get_antenna = mt76x2_get_antenna, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c index 15820b11f9db..dfd36d736b06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c @@ -187,7 +187,7 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) { const struct firmware *fw; const struct mt76x2_fw_header *hdr; - int i, len, ret; + int len, ret; __le32 *cur; u32 offset, val; @@ -240,16 +240,7 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) /* trigger firmware */ mt76_wr(dev, MT_MCU_INT_LEVEL, 2); - for (i = 200; i > 0; i--) { - val = mt76_rr(dev, MT_MCU_COM_REG0); - - if (val & 1) - break; - - msleep(10); - } - - if (!i) { + if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) { dev_err(dev->mt76.dev, "Firmware failed to start\n"); release_firmware(fw); return -ETIMEDOUT; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 5b742749d5de..fcc37eb7ce0b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -361,30 +361,53 @@ mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) primary_upper); } -static void -mt76x2_set_rx_chains(struct mt76x2_dev *dev) +void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) { u32 val; val = mt76_rr(dev, MT_BBP(AGC, 0)); - val &= ~(BIT(3) | BIT(4)); + val &= ~(BIT(4) | BIT(1)); + switch (dev->mt76.antenna_mask) { + case 1: + /* disable mac DAC control */ + mt76_clear(dev, MT_BBP(IBI, 9), BIT(11)); + mt76_clear(dev, MT_BBP(TXBE, 5), 3); + mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0x3); + mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 2); + /* disable DAC 1 */ + mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 4); - if (dev->chainmask & BIT(1)) - val |= BIT(3); + val &= ~(BIT(3) | BIT(0)); + break; + case 2: + /* disable mac DAC control */ + mt76_clear(dev, MT_BBP(IBI, 9), BIT(11)); + mt76_rmw_field(dev, MT_BBP(TXBE, 5), 3, 1); + mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xc); + mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 1); + /* disable DAC 0 */ + mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 1); + + val &= ~BIT(3); + val |= BIT(0); + break; + case 3: + default: + /* enable mac DAC control */ + mt76_set(dev, MT_BBP(IBI, 9), BIT(11)); + mt76_set(dev, MT_BBP(TXBE, 5), 3); + mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xf); + mt76_clear(dev, MT_BBP(CORE, 32), GENMASK(21, 20)); + mt76_clear(dev, MT_BBP(CORE, 33), GENMASK(12, 9)); + val &= ~BIT(0); + val |= BIT(3); + break; + } mt76_wr(dev, MT_BBP(AGC, 0), val); } static void -mt76x2_set_tx_dac(struct mt76x2_dev *dev) -{ - if (dev->chainmask & BIT(1)) - mt76_set(dev, MT_BBP(TXBE, 5), 3); - else - mt76_clear(dev, MT_BBP(TXBE, 5), 3); -} - -static void mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest) { dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN); @@ -585,10 +608,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, mt76x2_configure_tx_delay(dev, band, bw); mt76x2_phy_set_txpower(dev); - mt76x2_set_rx_chains(dev); mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); - mt76x2_set_tx_dac(dev); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | @@ -604,6 +625,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); + mt76x2_phy_set_antenna(dev); + /* Enable LDPC Rx */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h index ce3ab85c8b0f..b9c334d9e5b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h @@ -321,6 +321,8 @@ #define MT_TX_PWR_CFG_2 0x131c #define MT_TX_PWR_CFG_3 0x1320 #define MT_TX_PWR_CFG_4 0x1324 +#define MT_TX_PIN_CFG 0x1328 +#define MT_TX_PIN_CFG_TXANT GENMASK(3, 0) #define MT_TX_BAND_CFG 0x132c #define MT_TX_BAND_CFG_UPPER_40M BIT(0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index 534e4bf9a34c..e46eafc4c436 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -36,9 +36,12 @@ void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, msta = (struct mt76x2_sta *) control->sta->drv_priv; wcid = &msta->wcid; + /* sw encrypted frames */ + if (!info->control.hw_key && wcid->hw_key_idx != -1) + control->sta = NULL; } - if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) { + if (vif && !control->sta) { struct mt76x2_vif *mvif; mvif = (struct mt76x2_vif *) vif->drv_priv; diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c index fc008475a03b..991a6a729b1e 100644 --- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c +++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c @@ -160,13 +160,11 @@ void mt7601u_init_debugfs(struct mt7601u_dev *dev) if (!dir) return; - debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp); - debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode); - - debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); - debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev, - &fops_regval); - debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); - debugfs_create_file("eeprom_param", S_IRUSR, dir, dev, - &fops_eeprom_param); + debugfs_create_u8("temperature", 0400, dir, &dev->raw_temp); + debugfs_create_u32("temp_mode", 0400, dir, &dev->temp_mode); + + debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); + debugfs_create_file("regval", 0600, dir, dev, &fops_regval); + debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); + debugfs_create_file("eeprom_param", 0400, dir, dev, &fops_eeprom_param); } diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index da6faea092d6..76117b402880 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -19,6 +19,7 @@ #include <asm/unaligned.h> #include "mt7601u.h" #include "eeprom.h" +#include "mac.h" static bool field_valid(u8 val) @@ -74,7 +75,7 @@ static int mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev) { const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); - u8 data[map_reads * 16]; + u8 data[round_up(MT_EFUSE_USAGE_MAP_SIZE, 16)]; int ret, i; u32 start = 0, end = 0, cnt_free; @@ -134,27 +135,6 @@ mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom) "Error: device has more than 1 RX/TX stream!\n"); } -static int -mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom) -{ - const void *src = eeprom + MT_EE_MAC_ADDR; - - ether_addr_copy(dev->macaddr, src); - - if (!is_valid_ether_addr(dev->macaddr)) { - eth_random_addr(dev->macaddr); - dev_info(dev->dev, - "Invalid MAC address, using random address %pM\n", - dev->macaddr); - } - - mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); - mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | - FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); - - return 0; -} - static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev, u8 *eeprom, u8 max_pwr) { @@ -400,7 +380,7 @@ mt7601u_eeprom_init(struct mt7601u_dev *dev) dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n", eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); - mt7601u_set_macaddr(dev, eeprom); + mt7601u_set_macaddr(dev, eeprom + MT_EE_MAC_ADDR); mt7601u_set_chip_cap(dev, eeprom); mt7601u_set_channel_power(dev, eeprom); mt7601u_set_country_reg(dev, eeprom); diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h index ec11ff66969d..2dc6b68e7fb9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/initvals.h +++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h @@ -139,6 +139,7 @@ static const struct mt76_reg_pair mac_common_vals[] = { { MT_TXOP_HLDR_ET, 0x00000002 }, { MT_XIFS_TIME_CFG, 0x33a41010 }, { MT_PWR_PIN_CFG, 0x00000000 }, + { MT_PN_PAD_MODE, 0x00000001 }, }; static const struct mt76_reg_pair mac_chip_vals[] = { diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index d6dc59bb00df..d55d7040a56d 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -16,6 +16,22 @@ #include "trace.h" #include <linux/etherdevice.h> +void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr) +{ + ether_addr_copy(dev->macaddr, addr); + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); +} + static void mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate) { @@ -464,8 +480,16 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { status->flag |= RX_FLAG_DECRYPTED; - status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_MIC_STRIPPED; + status->flag |= RX_FLAG_ICV_STRIPPED; + status->flag |= RX_FLAG_IV_STRIPPED; } + /* let mac80211 take care of PN validation since apparently + * the hardware does not support it + */ + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_PN_LEN)) + status->flag &= ~RX_FLAG_IV_STRIPPED; status->chains = BIT(0); rssi = mt7601u_phy_get_rssi(dev, rxwi, rate); diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h index 2c22d63c63a2..b7aa24656d0e 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.h +++ b/drivers/net/wireless/mediatek/mt7601u/mac.h @@ -174,5 +174,6 @@ u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev); void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat); +void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr); #endif diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index 43ebd460ba86..3c9ea40d9584 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -64,6 +64,9 @@ static int mt7601u_add_interface(struct ieee80211_hw *hw, */ mvif->idx = idx; + if (!ether_addr_equal(dev->macaddr, vif->addr)) + mt7601u_set_macaddr(dev, vif->addr); + if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG)) return -ENOSPC; dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG); diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index 65a8004418ea..61705f679856 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -58,8 +58,7 @@ static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev, trace_mt_mcu_msg_send(dev, skb, csum, need_resp); } -static struct sk_buff * -mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) +static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len) { struct sk_buff *skb; @@ -171,7 +170,7 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, .value = cpu_to_le32(val), }; - skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); @@ -208,7 +207,7 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) .value = cpu_to_le32(val), }; - skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); @@ -421,7 +420,7 @@ static int mt7601u_load_firmware(struct mt7601u_dev *dev) MT_USB_DMA_CFG_TX_BULK_EN)); if (firmware_running(dev)) - return 0; + return firmware_request_cache(dev->dev, MT7601U_FIRMWARE); ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev); if (ret) diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h index c7ec40475a5f..9233744451a9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -147,7 +147,8 @@ enum { * @rx_lock: protects @rx_q. * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. * @mutex: ensures exclusive access from mac80211 callbacks. - * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes. + * @vendor_req_mutex: protects @vend_buf, ensures atomicity of read/write + * accesses * @reg_atomic_mutex: ensures atomicity of indirect register accesses * (accesses to RF and BBP). * @hw_atomic_mutex: ensures exclusive access to HW during critical diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h index 289897300ef0..82c8898b9076 100644 --- a/drivers/net/wireless/mediatek/mt7601u/trace.h +++ b/drivers/net/wireless/mediatek/mt7601u/trace.h @@ -34,7 +34,7 @@ #define REG_PR_FMT "%04x=%08x" #define REG_PR_ARG __entry->reg, __entry->val -DECLARE_EVENT_CLASS(dev_reg_evt, +DECLARE_EVENT_CLASS(dev_reg_evtu, TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), TP_ARGS(dev, reg, val), TP_STRUCT__entry( @@ -51,12 +51,12 @@ DECLARE_EVENT_CLASS(dev_reg_evt, ) ); -DEFINE_EVENT(dev_reg_evt, reg_read, +DEFINE_EVENT(dev_reg_evtu, reg_read, TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), TP_ARGS(dev, reg, val) ); -DEFINE_EVENT(dev_reg_evt, reg_write, +DEFINE_EVENT(dev_reg_evtu, reg_write, TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), TP_ARGS(dev, reg, val) ); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index b9e4f6793138..d8b7863f7926 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -129,15 +129,14 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev) MT_VEND_DEV_MODE_RESET, 0, NULL, 0); } -u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) +/* should be called with vendor_req_mutex held */ +static u32 __mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { int ret; u32 val = ~0; WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); - mutex_lock(&dev->vendor_req_mutex); - ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, 0, offset, dev->vend_buf, MT_VEND_BUF); if (ret == MT_VEND_BUF) @@ -146,25 +145,41 @@ u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", ret, offset); - mutex_unlock(&dev->vendor_req_mutex); - trace_reg_read(dev, offset, val); return val; } -int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, - const u16 offset, const u32 val) +u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { - int ret; + u32 ret; mutex_lock(&dev->vendor_req_mutex); + ret = __mt7601u_rr(dev, offset); + mutex_unlock(&dev->vendor_req_mutex); - ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, - val & 0xffff, offset, NULL, 0); + return ret; +} + +/* should be called with vendor_req_mutex held */ +static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val & 0xffff, offset, NULL, 0); if (!ret) ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val >> 16, offset + 2, NULL, 0); + trace_reg_write(dev, offset, val); + return ret; +} + +int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + int ret; + mutex_lock(&dev->vendor_req_mutex); + ret = __mt7601u_vendor_single_wr(dev, req, offset, val); mutex_unlock(&dev->vendor_req_mutex); return ret; @@ -175,23 +190,30 @@ void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); - trace_reg_write(dev, offset, val); } u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { - val |= mt7601u_rr(dev, offset) & ~mask; - mt7601u_wr(dev, offset, val); + mutex_lock(&dev->vendor_req_mutex); + val |= __mt7601u_rr(dev, offset) & ~mask; + __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); + mutex_unlock(&dev->vendor_req_mutex); + return val; } u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { - u32 reg = mt7601u_rr(dev, offset); + u32 reg; + mutex_lock(&dev->vendor_req_mutex); + reg = __mt7601u_rr(dev, offset); val |= reg & ~mask; if (reg != val) - mt7601u_wr(dev, offset, val); + __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, + offset, val); + mutex_unlock(&dev->vendor_req_mutex); + return val; } diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig index 30943656e989..de84ce125c26 100644 --- a/drivers/net/wireless/quantenna/Kconfig +++ b/drivers/net/wireless/quantenna/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_QUANTENNA If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_QUANTENNA diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 56e5fed92a2a..0a1604683bab 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -59,8 +59,9 @@ struct qtnf_bus { char fwname[32]; struct napi_struct mux_napi; struct net_device mux_dev; - struct completion request_firmware_complete; + struct completion firmware_init_complete; struct workqueue_struct *workqueue; + struct work_struct fw_work; struct work_struct event_work; struct mutex bus_lock; /* lock during command/event processing */ struct dentry *dbg_dir; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 6f6190964320..f117904d9120 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -127,7 +127,7 @@ static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv) spin_unlock_irqrestore(&priv->irq_lock, flags); } -static int qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv) +static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -148,8 +148,6 @@ static int qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv) pr_warn("legacy PCIE interrupts enabled\n"); pci_intx(pdev, 1); } - - return 0; } static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv) @@ -162,6 +160,17 @@ static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv) qtnf_non_posted_write(cfg, reg); } +static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv) +{ + const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET); + void __iomem *reg = priv->sysctl_bar + + QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; + + qtnf_non_posted_write(data, reg); + msleep(QTN_EP_RESET_WAIT_MS); + pci_restore_state(priv->pdev); +} + static void qtnf_ipc_gen_ep_int(void *arg) { const struct qtnf_pcie_bus_priv *priv = arg; @@ -478,10 +487,11 @@ static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv) } /* all rx/tx activity should have ceased before calling this function */ -static void free_xfer_buffers(void *data) +static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv) { - struct qtnf_pcie_bus_priv *priv = (struct qtnf_pcie_bus_priv *)data; + struct qtnf_tx_bd *txbd; struct qtnf_rx_bd *rxbd; + struct sk_buff *skb; dma_addr_t paddr; int i; @@ -489,19 +499,26 @@ static void free_xfer_buffers(void *data) for (i = 0; i < priv->rx_bd_num; i++) { if (priv->rx_skb && priv->rx_skb[i]) { rxbd = &priv->rx_bd_vbase[i]; + skb = priv->rx_skb[i]; paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); - - dev_kfree_skb_any(priv->rx_skb[i]); + dev_kfree_skb_any(skb); + priv->rx_skb[i] = NULL; } } /* free tx buffers */ for (i = 0; i < priv->tx_bd_num; i++) { if (priv->tx_skb && priv->tx_skb[i]) { - dev_kfree_skb_any(priv->tx_skb[i]); + txbd = &priv->tx_bd_vbase[i]; + skb = priv->tx_skb[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), + le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); priv->tx_skb[i] = NULL; } } @@ -937,6 +954,98 @@ static const struct qtnf_bus_ops qtnf_pcie_bus_ops = { .data_rx_stop = qtnf_pcie_data_rx_stop, }; +static int qtnf_dbg_mps_show(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "%d\n", priv->mps); + + return 0; +} + +static int qtnf_dbg_msi_show(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "%u\n", priv->msi_enabled); + + return 0; +} + +static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); + u32 status; + + seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); + seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); + status = reg & PCIE_HDP_INT_TX_BITS; + seq_printf(s, "pcie_irq_tx_status(%s)\n", + (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); + status = reg & PCIE_HDP_INT_RX_BITS; + seq_printf(s, "pcie_irq_rx_status(%s)\n", + (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); + status = reg & PCIE_HDP_INT_HHBM_UF; + seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", + (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); + + return 0; +} + +static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); + seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); + seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); + seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); + + seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); + seq_printf(s, "tx_bd_p_index(%u)\n", + readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) + & (priv->tx_bd_num - 1)); + seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); + seq_printf(s, "tx queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + + seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); + seq_printf(s, "rx_bd_p_index(%u)\n", + readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) + & (priv->rx_bd_num - 1)); + seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); + seq_printf(s, "rx alloc queue len(%u)\n", + CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num)); + + return 0; +} + +static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + + seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", + priv->shm_ipc_ep_in.tx_packet_count); + seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", + priv->shm_ipc_ep_in.rx_packet_count); + seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", + priv->shm_ipc_ep_out.tx_timeout_count); + seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", + priv->shm_ipc_ep_out.rx_packet_count); + + return 0; +} + static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size, int blk, const u8 *pblk, const u8 *fw) { @@ -1052,181 +1161,102 @@ qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size) return 0; } -static void qtnf_firmware_load(const struct firmware *fw, void *context) -{ - struct qtnf_pcie_bus_priv *priv = (void *)context; - struct pci_dev *pdev = priv->pdev; - struct qtnf_bus *bus = pci_get_drvdata(pdev); - int ret; - - if (!fw) { - pr_err("failed to get firmware %s\n", bus->fwname); - goto fw_load_err; - } - - ret = qtnf_ep_fw_load(priv, fw->data, fw->size); - if (ret) { - pr_err("FW upload error\n"); - goto fw_load_err; - } - - if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE, - QTN_FW_DL_TIMEOUT_MS)) { - pr_err("FW bringup timed out\n"); - goto fw_load_err; - } - - bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; - pr_info("firmware is up and running\n"); - -fw_load_err: - - if (fw) - release_firmware(fw); - - complete(&bus->request_firmware_complete); -} - -static int qtnf_bringup_fw(struct qtnf_bus *bus) +static void qtnf_fw_work_handler(struct work_struct *work) { + struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); struct pci_dev *pdev = priv->pdev; + const struct firmware *fw; int ret; u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; - if (flashboot) + if (flashboot) { state |= QTN_RC_FW_FLASHBOOT; + } else { + ret = request_firmware(&fw, bus->fwname, &pdev->dev); + if (ret < 0) { + pr_err("failed to get firmware %s\n", bus->fwname); + goto fw_load_fail; + } + } qtnf_set_state(&priv->bda->bda_rc_state, state); if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY, QTN_FW_DL_TIMEOUT_MS)) { pr_err("card is not ready\n"); - return -ETIMEDOUT; + goto fw_load_fail; } qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY); if (flashboot) { - pr_info("Booting FW from flash\n"); - - if (!qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE, - QTN_FW_DL_TIMEOUT_MS)) - bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; + pr_info("booting firmware from flash\n"); + } else { + pr_info("starting firmware upload: %s\n", bus->fwname); - return 0; + ret = qtnf_ep_fw_load(priv, fw->data, fw->size); + release_firmware(fw); + if (ret) { + pr_err("firmware upload error\n"); + goto fw_load_fail; + } } - pr_info("starting firmware upload: %s\n", bus->fwname); - - ret = request_firmware_nowait(THIS_MODULE, 1, bus->fwname, &pdev->dev, - GFP_KERNEL, priv, qtnf_firmware_load); - if (ret < 0) - pr_err("request_firmware_nowait error %d\n", ret); - else - ret = 1; - - return ret; -} - -static void qtnf_reclaim_tasklet_fn(unsigned long data) -{ - struct qtnf_pcie_bus_priv *priv = (void *)data; - - qtnf_pcie_data_tx_reclaim(priv); - qtnf_en_txdone_irq(priv); -} - -static int qtnf_dbg_mps_show(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("firmware bringup timed out\n"); + goto fw_load_fail; + } - seq_printf(s, "%d\n", priv->mps); + bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; + pr_info("firmware is up and running\n"); - return 0; -} + if (qtnf_poll_state(&priv->bda->bda_ep_state, + QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { + pr_err("firmware runtime failure\n"); + goto fw_load_fail; + } -static int qtnf_dbg_msi_show(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + ret = qtnf_core_attach(bus); + if (ret) { + pr_err("failed to attach core\n"); + goto fw_load_fail; + } - seq_printf(s, "%u\n", priv->msi_enabled); + qtnf_debugfs_init(bus, DRV_NAME); + qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); + qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); + qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); + qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); + qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); - return 0; -} + goto fw_load_exit; -static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) -{ - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); - u32 status; +fw_load_fail: + bus->fw_state = QTNF_FW_STATE_DEAD; - seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); - seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); - status = reg & PCIE_HDP_INT_TX_BITS; - seq_printf(s, "pcie_irq_tx_status(%s)\n", - (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); - seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); - status = reg & PCIE_HDP_INT_RX_BITS; - seq_printf(s, "pcie_irq_rx_status(%s)\n", - (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); - seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); - status = reg & PCIE_HDP_INT_HHBM_UF; - seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", - (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); - - return 0; +fw_load_exit: + complete(&bus->firmware_init_complete); + put_device(&pdev->dev); } -static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) +static void qtnf_bringup_fw_async(struct qtnf_bus *bus) { - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); - seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); - seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); - seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); - - seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); - seq_printf(s, "tx_bd_p_index(%u)\n", - readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) - & (priv->tx_bd_num - 1)); - seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); - seq_printf(s, "tx queue len(%u)\n", - CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, - priv->tx_bd_num)); - - seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); - seq_printf(s, "rx_bd_p_index(%u)\n", - readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) - & (priv->rx_bd_num - 1)); - seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); - seq_printf(s, "rx alloc queue len(%u)\n", - CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, - priv->rx_bd_num)); + struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); + struct pci_dev *pdev = priv->pdev; - return 0; + get_device(&pdev->dev); + INIT_WORK(&bus->fw_work, qtnf_fw_work_handler); + schedule_work(&bus->fw_work); } -static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) +static void qtnf_reclaim_tasklet_fn(unsigned long data) { - struct qtnf_bus *bus = dev_get_drvdata(s->private); - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - - seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", - priv->shm_ipc_ep_in.tx_packet_count); - seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", - priv->shm_ipc_ep_in.rx_packet_count); - seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", - priv->shm_ipc_ep_out.tx_timeout_count); - seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", - priv->shm_ipc_ep_out.rx_packet_count); + struct qtnf_pcie_bus_priv *priv = (void *)data; - return 0; + qtnf_pcie_data_tx_reclaim(priv); + qtnf_en_txdone_irq(priv); } static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1237,10 +1267,8 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL); - if (!bus) { - ret = -ENOMEM; - goto err_init; - } + if (!bus) + return -ENOMEM; pcie_priv = get_bus_priv(bus); @@ -1251,7 +1279,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv->pdev = pdev; strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); - init_completion(&bus->request_firmware_complete); + init_completion(&bus->firmware_init_complete); mutex_init(&bus->bus_lock); spin_lock_init(&pcie_priv->tx0_lock); spin_lock_init(&pcie_priv->irq_lock); @@ -1267,11 +1295,18 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; + tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn, + (unsigned long)pcie_priv); + + init_dummy_netdev(&bus->mux_dev); + netif_napi_add(&bus->mux_dev, &bus->mux_napi, + qtnf_rx_poll, 10); + pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE"); if (!pcie_priv->workqueue) { pr_err("failed to alloc bus workqueue\n"); ret = -ENODEV; - goto err_priv; + goto err_init; } if (!pci_is_pcie(pdev)) { @@ -1300,14 +1335,8 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_base; } - pcim_pin_device(pdev); pci_set_master(pdev); - - ret = qtnf_pcie_init_irq(pcie_priv); - if (ret < 0) { - pr_err("irq init failed\n"); - goto err_base; - } + qtnf_pcie_init_irq(pcie_priv); ret = qtnf_pcie_init_memory(pcie_priv); if (ret < 0) { @@ -1315,22 +1344,18 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_base; } + pci_save_state(pdev); + ret = qtnf_pcie_init_shm_ipc(pcie_priv); if (ret < 0) { pr_err("PCIE SHM IPC init failed\n"); goto err_base; } - ret = devm_add_action(&pdev->dev, free_xfer_buffers, (void *)pcie_priv); - if (ret) { - pr_err("custom release callback init failed\n"); - goto err_base; - } - ret = qtnf_pcie_init_xfer(pcie_priv); if (ret) { pr_err("PCIE xfer init failed\n"); - goto err_base; + goto err_ipc; } /* init default irq settings */ @@ -1343,58 +1368,28 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) "qtnf_pcie_irq", (void *)bus); if (ret) { pr_err("failed to request pcie irq %d\n", pdev->irq); - goto err_base; - } - - tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn, - (unsigned long)pcie_priv); - init_dummy_netdev(&bus->mux_dev); - netif_napi_add(&bus->mux_dev, &bus->mux_napi, - qtnf_rx_poll, 10); - - ret = qtnf_bringup_fw(bus); - if (ret < 0) - goto err_bringup_fw; - else if (ret) - wait_for_completion(&bus->request_firmware_complete); - - if (bus->fw_state != QTNF_FW_STATE_FW_DNLD_DONE) { - pr_err("failed to start FW\n"); - goto err_bringup_fw; - } - - if (qtnf_poll_state(&pcie_priv->bda->bda_ep_state, QTN_EP_FW_QLINK_DONE, - QTN_FW_QLINK_TIMEOUT_MS)) { - pr_err("FW runtime failure\n"); - goto err_bringup_fw; + goto err_xfer; } - ret = qtnf_core_attach(bus); - if (ret) { - pr_err("failed to attach core\n"); - goto err_bringup_fw; - } - - qtnf_debugfs_init(bus, DRV_NAME); - qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); - qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); - qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); - qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); - qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); + qtnf_bringup_fw_async(bus); return 0; -err_bringup_fw: - netif_napi_del(&bus->mux_napi); +err_xfer: + qtnf_free_xfer_buffers(pcie_priv); + +err_ipc: + qtnf_pcie_free_shm_ipc(pcie_priv); err_base: flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); + netif_napi_del(&bus->mux_napi); -err_priv: +err_init: + tasklet_kill(&pcie_priv->reclaim_tq); pci_set_drvdata(pdev, NULL); -err_init: return ret; } @@ -1407,18 +1402,23 @@ static void qtnf_pcie_remove(struct pci_dev *pdev) if (!bus) return; + wait_for_completion(&bus->firmware_init_complete); + + if (bus->fw_state == QTNF_FW_STATE_ACTIVE) + qtnf_core_detach(bus); + priv = get_bus_priv(bus); - qtnf_core_detach(bus); netif_napi_del(&bus->mux_napi); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); tasklet_kill(&priv->reclaim_tq); + qtnf_free_xfer_buffers(priv); qtnf_debugfs_remove(bus); qtnf_pcie_free_shm_ipc(priv); + qtnf_reset_card(priv); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h index c5a4e46d26ef..00bb21a1c47a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h @@ -46,6 +46,7 @@ /* state transition timeouts */ #define QTN_FW_DL_TIMEOUT_MS 3000 #define QTN_FW_QLINK_TIMEOUT_MS 30000 +#define QTN_EP_RESET_WAIT_MS 1000 #define PCIE_HDP_INT_RX_BITS (0 \ | PCIE_HDP_INT_EP_TXDMA \ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h index 5b48b425fa7f..0bfe285b6b48 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h @@ -351,5 +351,6 @@ #define QTN_PEARL_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16)) #define QTN_PEARL_LHOST_IPC_IRQ (6) +#define QTN_PEARL_LHOST_EP_RESET (7) #endif /* __PEARL_PCIE_H */ diff --git a/drivers/net/wireless/ralink/Kconfig b/drivers/net/wireless/ralink/Kconfig index 41dbf3130e2b..9b79e59ee97b 100644 --- a/drivers/net/wireless/ralink/Kconfig +++ b/drivers/net/wireless/ralink/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_RALINK If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_RALINK diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index f4b48b77c491..3df8c4b895e7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -37,7 +37,7 @@ * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 5cf655ff1430..1172eefd1c1a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -49,7 +49,7 @@ * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt = false; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index a985a5a7945e..6848ebc83534 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -41,7 +41,7 @@ /* Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 24fc6d2045ef..d901a41d36e4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -43,7 +43,7 @@ * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index ac2572943ed0..0eee479583b8 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -606,7 +606,7 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name, data += sprintf(data, "version:\t%s\n", DRV_VERSION); blob->size = strlen(blob->data); - return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); + return debugfs_create_blob(name, 0400, intf->driver_folder, blob); } static struct dentry *rt2x00debug_create_file_chipset(const char *name, @@ -647,7 +647,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, blob->size = strlen(blob->data); - return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); + return debugfs_create_blob(name, 0400, intf->driver_folder, blob); } void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) @@ -682,13 +682,13 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry) goto exit; - intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR, + intf->dev_flags = debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_dev_flags); if (IS_ERR(intf->dev_flags) || !intf->dev_flags) goto exit; - intf->cap_flags = debugfs_create_file("cap_flags", S_IRUSR, + intf->cap_flags = debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_cap_flags); if (IS_ERR(intf->cap_flags) || !intf->cap_flags) @@ -699,27 +699,28 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) if (IS_ERR(intf->register_folder) || !intf->register_folder) goto exit; -#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ -({ \ - if (debug->__name.read) { \ - (__intf)->__name##_off_entry = \ - debugfs_create_u32(__stringify(__name) "_offset", \ - S_IRUSR | S_IWUSR, \ - (__intf)->register_folder, \ - &(__intf)->offset_##__name); \ - if (IS_ERR((__intf)->__name##_off_entry) \ - || !(__intf)->__name##_off_entry) \ - goto exit; \ - \ - (__intf)->__name##_val_entry = \ - debugfs_create_file(__stringify(__name) "_value", \ - S_IRUSR | S_IWUSR, \ - (__intf)->register_folder, \ - (__intf), &rt2x00debug_fop_##__name); \ - if (IS_ERR((__intf)->__name##_val_entry) \ - || !(__intf)->__name##_val_entry) \ - goto exit; \ - } \ +#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ +({ \ + if (debug->__name.read) { \ + (__intf)->__name##_off_entry = \ + debugfs_create_u32(__stringify(__name) "_offset", \ + 0600, \ + (__intf)->register_folder, \ + &(__intf)->offset_##__name); \ + if (IS_ERR((__intf)->__name##_off_entry) || \ + !(__intf)->__name##_off_entry) \ + goto exit; \ + \ + (__intf)->__name##_val_entry = \ + debugfs_create_file(__stringify(__name) "_value", \ + 0600, \ + (__intf)->register_folder, \ + (__intf), \ + &rt2x00debug_fop_##__name); \ + if (IS_ERR((__intf)->__name##_val_entry) || \ + !(__intf)->__name##_val_entry) \ + goto exit; \ + } \ }) RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr); @@ -736,8 +737,8 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) goto exit; intf->queue_frame_dump_entry = - debugfs_create_file("dump", S_IRUSR, intf->queue_folder, - intf, &rt2x00debug_fop_queue_dump); + debugfs_create_file("dump", 0400, intf->queue_folder, + intf, &rt2x00debug_fop_queue_dump); if (IS_ERR(intf->queue_frame_dump_entry) || !intf->queue_frame_dump_entry) goto exit; @@ -746,14 +747,15 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) init_waitqueue_head(&intf->frame_dump_waitqueue); intf->queue_stats_entry = - debugfs_create_file("queue", S_IRUSR, intf->queue_folder, - intf, &rt2x00debug_fop_queue_stats); + debugfs_create_file("queue", 0400, intf->queue_folder, + intf, &rt2x00debug_fop_queue_stats); #ifdef CONFIG_RT2X00_LIB_CRYPTO if (rt2x00_has_cap_hw_crypto(rt2x00dev)) intf->crypto_stats_entry = - debugfs_create_file("crypto", S_IRUGO, intf->queue_folder, - intf, &rt2x00debug_fop_crypto_stats); + debugfs_create_file("crypto", 0444, intf->queue_folder, + intf, + &rt2x00debug_fop_crypto_stats); #endif return; diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 234310200759..cb0e1196f2c2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -40,7 +40,7 @@ * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt = false; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 9a212823f42c..319ec4f2d9d2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -38,7 +38,7 @@ * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; -module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0133fcd4601b..7f9b16b97ea3 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2815,9 +2815,11 @@ static int __init init_ray_cs(void) proc_mkdir("driver/ray_cs", NULL); proc_create("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_fops); - proc_create("driver/ray_cs/essid", S_IWUSR, NULL, &ray_cs_essid_proc_fops); - proc_create_data("driver/ray_cs/net_type", S_IWUSR, NULL, &int_proc_fops, &net_type); - proc_create_data("driver/ray_cs/translate", S_IWUSR, NULL, &int_proc_fops, &translate); + proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_fops); + proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_fops, + &net_type); + proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_fops, + &translate); #endif if (translate != 0) translate = 1; diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig index 8a8ba2003964..3db988e689d7 100644 --- a/drivers/net/wireless/realtek/Kconfig +++ b/drivers/net/wireless/realtek/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_REALTEK If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_REALTEK diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 121b94f09714..9a1d15b3ce45 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -1450,6 +1450,7 @@ static int rtl8187_probe(struct usb_interface *intf, goto err_free_dev; } mutex_init(&priv->io_mutex); + mutex_init(&priv->conf_mutex); SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); @@ -1625,7 +1626,6 @@ static int rtl8187_probe(struct usb_interface *intf, printk(KERN_ERR "rtl8187: Cannot register device\n"); goto err_free_dmabuf; } - mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 7806a4d2b1fc..718a73c623a7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4431,7 +4431,7 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) timeout = page_thresh; else if (rtl8xxxu_dma_agg_pages <= 6) dev_err(&priv->udev->dev, - "%s: dma_agg_pages=%i too small, minium is 6\n", + "%s: dma_agg_pages=%i too small, minimum is 6\n", __func__, rtl8xxxu_dma_agg_pages); else dev_err(&priv->udev->dev, diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index d6c03bd5cc65..762a29cdf7ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -244,6 +244,9 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!(rtlpriv->cfg->spec_ver & RTL_SPEC_SUPPORT_VHT)) + return; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE || rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) { u16 mcs_map; @@ -397,6 +400,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); /* swlps or hwlps has been set in diff chip in init_sw_vars */ if (rtlpriv->psc.swctrl_lps) { @@ -886,8 +890,7 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, tcb_desc->packet_bw = HT_CHANNEL_WIDTH_20_40; - if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE || - rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE) { + if (rtlpriv->cfg->spec_ver & RTL_SPEC_SUPPORT_VHT) { if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { @@ -1546,7 +1549,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, /* EAPOL is seens as in-4way */ rtlpriv->btcoexist.btc_info.in_4way = true; rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; - rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"); @@ -1594,7 +1596,11 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw) struct rtl_tx_report *tx_report = &rtlpriv->tx_report; u16 sn; - sn = atomic_inc_return(&tx_report->sn) & 0x0FFF; + /* SW_DEFINE[11:8] are reserved (driver fills zeros) + * SW_DEFINE[7:2] are used by driver + * SW_DEFINE[1:0] are reserved for firmware (driver fills zeros) + */ + sn = (atomic_inc_return(&tx_report->sn) & 0x003F) << 2; tx_report->last_sent_sn = sn; tx_report->last_sent_time = jiffies; @@ -1622,14 +1628,23 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_report *tx_report = &rtlpriv->tx_report; u16 sn; + u8 st, retry; - sn = ((tmp_buf[7] & 0x0F) << 8) | tmp_buf[6]; + if (rtlpriv->cfg->spec_ver & RTL_SPEC_EXT_C2H) { + sn = GET_TX_REPORT_SN_V2(tmp_buf); + st = GET_TX_REPORT_ST_V2(tmp_buf); + retry = GET_TX_REPORT_RETRY_V2(tmp_buf); + } else { + sn = GET_TX_REPORT_SN_V1(tmp_buf); + st = GET_TX_REPORT_ST_V1(tmp_buf); + retry = GET_TX_REPORT_RETRY_V1(tmp_buf); + } tx_report->last_recv_sn = sn; RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG, "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n", - tmp_buf[0], sn, tmp_buf[2]); + st, sn, retry); } EXPORT_SYMBOL_GPL(rtl_tx_report_handler); @@ -1643,7 +1658,8 @@ bool rtl_check_tx_report_acked(struct ieee80211_hw *hw) if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) { RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING, - "Check TX-Report timeout!!\n"); + "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n", + tx_report->last_sent_sn, tx_report->last_recv_sn); return true; /* 3 sec. (timeout) seen as acked */ } @@ -2629,6 +2645,11 @@ EXPORT_SYMBOL_GPL(rtl_global_var); static int __init rtl_core_module_init(void) { + BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION); + BUILD_BUG_ON(MAX_RATE_SECTION_NUM != MAX_RATE_SECTION); + BUILD_BUG_ON(MAX_BASE_NUM_IN_PHY_REG_PG_24G != MAX_RATE_SECTION); + BUILD_BUG_ON(MAX_BASE_NUM_IN_PHY_REG_PG_5G != (MAX_RATE_SECTION - 1)); + if (rtl_rate_control_register()) pr_err("rtl: Unable to register rtl_rc, use default RC !!\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index fd3b1fb35dff..59553db020ef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -1104,7 +1104,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, } if ((type == 1) || (type == 2) || (type == 9) || (type == 11) || - (type == 101) || (type == 102) || (type == 109) || (type == 101)) { + (type == 101) || (type == 102) || (type == 109) || (type == 111)) { if (!coex_sta->force_lps_on) { /* Native power save TDMA, only for A2DP-only case * 1/2/9/11 while wifi noisy threshold > 30 @@ -1436,6 +1436,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } +static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, u8 wifi_status) { diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 4907c2ffadfe..73ec31972944 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -833,9 +833,9 @@ static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex, btc8723b2ant_set_dac_swing_reg(btcoex, 0x18); } -void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, - bool force_exec, bool dac_swing_on, - u32 dac_swing_lvl) +static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 0b26419881c0..202597cf8915 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -426,39 +426,6 @@ static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } -bool btc8821a1ant_is_wifi_status_changed(struct btc_coexist *btcoexist) -{ - static bool pre_wifi_busy = true; - static bool pre_under_4way = true; - static bool pre_bt_hs_on = true; - bool wifi_busy = false, under_4way = false, bt_hs_on = false; - bool wifi_connected = false; - - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, - &wifi_connected); - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, - &under_4way); - - if (wifi_connected) { - if (wifi_busy != pre_wifi_busy) { - pre_wifi_busy = wifi_busy; - return true; - } - if (under_4way != pre_under_4way) { - pre_under_4way = under_4way; - return true; - } - if (bt_hs_on != pre_bt_hs_on) { - pre_bt_hs_on = bt_hs_on; - return true; - } - } - - return false; -} - static void btc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) { struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index d5f282cb9d24..2202d5e18977 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -359,7 +359,7 @@ static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } -bool btc8821a2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +static bool btc8821a2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) { static bool pre_wifi_busy = true; static bool pre_under_4way = true; @@ -1517,7 +1517,7 @@ static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist) btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); } -void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmpa, u8tmpb; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c new file mode 100644 index 000000000000..951b8c1e0153 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2016-2017 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + *****************************************************************************/ +#include "halbt_precomp.h" + +void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg) +{ + /*BB control*/ + halwifionly_phy_set_bb_reg(wifionlycfg, 0x4c, 0x01800000, 0x2); + /*SW control*/ + halwifionly_phy_set_bb_reg(wifionlycfg, 0xcb4, 0xff, 0x77); + /*antenna mux switch */ + halwifionly_phy_set_bb_reg(wifionlycfg, 0x974, 0x300, 0x3); + + halwifionly_phy_set_bb_reg(wifionlycfg, 0x1990, 0x300, 0x0); + + halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x80000, 0x0); + /*switch to WL side controller and gnt_wl gnt_bt debug signal */ + halwifionly_phy_set_bb_reg(wifionlycfg, 0x70, 0xff000000, 0x0e); + /*gnt_wl=1 , gnt_bt=0*/ + halwifionly_phy_set_bb_reg(wifionlycfg, 0x1704, 0xffffffff, 0x7700); + halwifionly_phy_set_bb_reg(wifionlycfg, 0x1700, 0xffffffff, 0xc00f0038); +} + +void ex_hal8822b_wifi_only_scannotify(struct wifi_only_cfg *wifionlycfg, + u8 is_5g) +{ + hal8822b_wifi_only_switch_antenna(wifionlycfg, is_5g); +} + +void ex_hal8822b_wifi_only_switchbandnotify(struct wifi_only_cfg *wifionlycfg, + u8 is_5g) +{ + hal8822b_wifi_only_switch_antenna(wifionlycfg, is_5g); +} + +void hal8822b_wifi_only_switch_antenna(struct wifi_only_cfg *wifionlycfg, + u8 is_5g) +{ + if (is_5g) + halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x300, 0x1); + else + halwifionly_phy_set_bb_reg(wifionlycfg, 0xcbc, 0x300, 0x2); +} diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h new file mode 100644 index 000000000000..6ec356542eea --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h @@ -0,0 +1,25 @@ +/****************************************************************************** + * + * Copyright(c) 2016-2017 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + *****************************************************************************/ +#ifndef __INC_HAL8822BWIFIONLYHWCFG_H +#define __INC_HAL8822BWIFIONLYHWCFG_H + +void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg); +void ex_hal8822b_wifi_only_scannotify(struct wifi_only_cfg *wifionlycfg, + u8 is_5g); +void ex_hal8822b_wifi_only_switchbandnotify(struct wifi_only_cfg *wifionlycfg, + u8 is_5g); +void hal8822b_wifi_only_switch_antenna(struct wifi_only_cfg *wifionlycfg, + u8 is_5g); +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 1404729441a2..8b6b07a936f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -363,6 +363,22 @@ static void halbtc_normal_lps(struct btc_coexist *btcoexist) } } +static void halbtc_pre_normal_lps(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + + if (btcoexist->bt_info.bt_ctrl_lps) { + btcoexist->bt_info.bt_lps_on = false; + rtl_lps_leave(rtlpriv->mac80211.hw); + } +} + +static void halbtc_post_normal_lps(struct btc_coexist *btcoexist) +{ + if (btcoexist->bt_info.bt_ctrl_lps) + btcoexist->bt_info.bt_ctrl_lps = false; +} + static void halbtc_leave_low_power(struct btc_coexist *btcoexist) { } @@ -577,6 +593,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) tmp = true; *bool_tmp = tmp; break; + case BTC_GET_BL_WIFI_DUAL_BAND_CONNECTED: + *u8_tmp = BTC_MULTIPORT_SCC; + break; case BTC_GET_BL_WIFI_BUSY: if (halbtc_is_wifi_busy(rtlpriv)) *bool_tmp = true; @@ -637,6 +656,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) case BTC_GET_BL_IS_ASUS_8723B: *bool_tmp = false; break; + case BTC_GET_BL_RF4CE_CONNECTED: + *bool_tmp = false; + break; case BTC_GET_S4_WIFI_RSSI: *s32_tmp = halbtc_get_wifi_rssi(rtlpriv); break; @@ -677,6 +699,21 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) case BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL: *u32_tmp = halbtc_get_bt_forbidden_slot_val(btcoexist); break; + case BTC_GET_U4_WIFI_IQK_TOTAL: + *u32_tmp = + btcoexist->btc_phydm_query_phy_counter(btcoexist, + DM_INFO_IQK_ALL); + break; + case BTC_GET_U4_WIFI_IQK_OK: + *u32_tmp = + btcoexist->btc_phydm_query_phy_counter(btcoexist, + DM_INFO_IQK_OK); + break; + case BTC_GET_U4_WIFI_IQK_FAIL: + *u32_tmp = + btcoexist->btc_phydm_query_phy_counter(btcoexist, + DM_INFO_IQK_NG); + break; case BTC_GET_U1_WIFI_DOT11_CHNL: *u8_tmp = rtlphy->current_channel; break; @@ -788,6 +825,12 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) case BTC_SET_ACT_NORMAL_LPS: halbtc_normal_lps(btcoexist); break; + case BTC_SET_ACT_PRE_NORMAL_LPS: + halbtc_pre_normal_lps(btcoexist); + break; + case BTC_SET_ACT_POST_NORMAL_LPS: + halbtc_post_normal_lps(btcoexist); + break; case BTC_SET_ACT_DISABLE_LOW_POWER: halbtc_disable_low_power(btcoexist, *bool_tmp); break; @@ -1039,6 +1082,28 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id, cmd_len, cmd_buf); } +void halbtc_send_wifi_port_id_cmd(void *bt_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 cmd_buf[1] = {0}; /* port id [2:0] = 0 */ + + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, H2C_BT_PORT_ID, + 1, cmd_buf); +} + +void halbtc_set_default_port_id_cmd(void *bt_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct ieee80211_hw *hw = rtlpriv->mac80211.hw; + + if (!rtlpriv->cfg->ops->set_default_port_id_cmd) + return; + + rtlpriv->cfg->ops->set_default_port_id_cmd(hw); +} + static void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val) { @@ -1079,6 +1144,11 @@ static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type, } } +static u32 halbtc_get_bt_reg(void *btc_context, u8 reg_type, u32 offset) +{ + return 0; +} + static bool halbtc_under_ips(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1097,6 +1167,25 @@ static bool halbtc_under_ips(struct btc_coexist *btcoexist) return false; } +static +u32 halbtc_get_phydm_version(void *btc_context) +{ + return 0; +} + +static +void halbtc_phydm_modify_ra_pcr_threshold(void *btc_context, + u8 ra_offset_direction, + u8 ra_threshold_offset) +{ +} + +static +u32 halbtc_phydm_query_phy_counter(void *btc_context, enum dm_info_query dm_id) +{ + return 0; +} + static u8 halbtc_get_ant_det_val_from_bt(void *btc_context) { struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; @@ -1210,6 +1299,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv) btcoexist->btc_get = halbtc_get; btcoexist->btc_set = halbtc_set; btcoexist->btc_set_bt_reg = halbtc_set_bt_reg; + btcoexist->btc_get_bt_reg = halbtc_get_bt_reg; btcoexist->bt_info.bt_ctrl_buf_size = false; btcoexist->bt_info.agg_buf_size = 5; @@ -1220,6 +1310,10 @@ bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv) halbtc_get_bt_coex_supported_feature; btcoexist->btc_get_bt_coex_supported_version = halbtc_get_bt_coex_supported_version; + btcoexist->btc_get_bt_phydm_version = halbtc_get_phydm_version; + btcoexist->btc_phydm_modify_ra_pcr_threshold = + halbtc_phydm_modify_ra_pcr_threshold; + btcoexist->btc_phydm_query_phy_counter = halbtc_phydm_query_phy_counter; btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt; btcoexist->btc_get_ble_scan_type_from_bt = halbtc_get_ble_scan_type_from_bt; @@ -1525,7 +1619,8 @@ void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg, void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) { - u8 asso_type; + u8 asso_type, asso_type_v2; + bool wifi_under_5g; if (!halbtc_is_bt_coexist_available(btcoexist)) return; @@ -1533,10 +1628,17 @@ void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) if (btcoexist->manual_control) return; - if (action) + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + + if (action) { asso_type = BTC_ASSOCIATE_START; - else + asso_type_v2 = wifi_under_5g ? BTC_ASSOCIATE_5G_START : + BTC_ASSOCIATE_START; + } else { asso_type = BTC_ASSOCIATE_FINISH; + asso_type_v2 = wifi_under_5g ? BTC_ASSOCIATE_5G_FINISH : + BTC_ASSOCIATE_FINISH; + } halbtc_leave_low_power(btcoexist); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 8ed217656539..9eae87d19120 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -62,6 +62,8 @@ #define BTC_ANT_PATH_WIFI 0 #define BTC_ANT_PATH_BT 1 #define BTC_ANT_PATH_PTA 2 +#define BTC_ANT_PATH_WIFI5G 3 +#define BTC_ANT_PATH_AUTO 4 /* dual Antenna definition */ #define BTC_ANT_WIFI_AT_MAIN 0 #define BTC_ANT_WIFI_AT_AUX 1 @@ -154,6 +156,7 @@ struct btc_board_info { u8 rfe_type; u8 ant_div_cfg; + u8 customer_id; }; enum btc_dbg_opcode { @@ -204,6 +207,7 @@ enum btc_wifi_traffic_dir { enum btc_wifi_pnp { BTC_WIFI_PNP_WAKE_UP = 0x0, BTC_WIFI_PNP_SLEEP = 0x1, + BTC_WIFI_PNP_SLEEP_KEEP_ANT = 0x2, BTC_WIFI_PNP_MAX }; @@ -250,6 +254,7 @@ enum btc_get_type { BTC_GET_BL_HS_OPERATION, BTC_GET_BL_HS_CONNECTING, BTC_GET_BL_WIFI_CONNECTED, + BTC_GET_BL_WIFI_DUAL_BAND_CONNECTED, BTC_GET_BL_WIFI_BUSY, BTC_GET_BL_WIFI_SCAN, BTC_GET_BL_WIFI_LINK, @@ -333,6 +338,7 @@ enum btc_set_type { BTC_SET_ACT_GET_BT_RSSI, BTC_SET_ACT_AGGREGATE_CTRL, BTC_SET_ACT_ANTPOSREGRISTRY_CTRL, + BTC_SET_MIMO_PS_MODE, /********* for 1Ant **********/ /* type bool */ @@ -347,8 +353,11 @@ enum btc_set_type { BTC_SET_ACT_LEAVE_LPS, BTC_SET_ACT_ENTER_LPS, BTC_SET_ACT_NORMAL_LPS, + BTC_SET_ACT_PRE_NORMAL_LPS, + BTC_SET_ACT_POST_NORMAL_LPS, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, BTC_SET_ACT_DISABLE_LOW_POWER, + BTC_SET_BL_BT_LNA_CONSTRAIN_LEVEL, BTC_SET_ACT_UPDATE_RAMASK, BTC_SET_ACT_SEND_MIMO_PS, /* BT Coex related */ @@ -383,6 +392,7 @@ enum btc_notify_type_lps { enum btc_notify_type_scan { BTC_SCAN_FINISH = 0x0, BTC_SCAN_START = 0x1, + BTC_SCAN_START_2G = 0x2, BTC_SCAN_MAX }; @@ -397,6 +407,8 @@ enum btc_notify_type_switchband { enum btc_notify_type_associate { BTC_ASSOCIATE_FINISH = 0x0, BTC_ASSOCIATE_START = 0x1, + BTC_ASSOCIATE_5G_FINISH = 0x2, + BTC_ASSOCIATE_5G_START = 0x3, BTC_ASSOCIATE_MAX }; @@ -435,6 +447,107 @@ enum btc_notify_type_stack_operation { BTC_STACK_OP_MAX }; +enum { + BTC_CCK_1, + BTC_CCK_2, + BTC_CCK_5_5, + BTC_CCK_11, + BTC_OFDM_6, + BTC_OFDM_9, + BTC_OFDM_12, + BTC_OFDM_18, + BTC_OFDM_24, + BTC_OFDM_36, + BTC_OFDM_48, + BTC_OFDM_54, + BTC_MCS_0, + BTC_MCS_1, + BTC_MCS_2, + BTC_MCS_3, + BTC_MCS_4, + BTC_MCS_5, + BTC_MCS_6, + BTC_MCS_7, + BTC_MCS_8, + BTC_MCS_9, + BTC_MCS_10, + BTC_MCS_11, + BTC_MCS_12, + BTC_MCS_13, + BTC_MCS_14, + BTC_MCS_15, + BTC_MCS_16, + BTC_MCS_17, + BTC_MCS_18, + BTC_MCS_19, + BTC_MCS_20, + BTC_MCS_21, + BTC_MCS_22, + BTC_MCS_23, + BTC_MCS_24, + BTC_MCS_25, + BTC_MCS_26, + BTC_MCS_27, + BTC_MCS_28, + BTC_MCS_29, + BTC_MCS_30, + BTC_MCS_31, + BTC_VHT_1SS_MCS_0, + BTC_VHT_1SS_MCS_1, + BTC_VHT_1SS_MCS_2, + BTC_VHT_1SS_MCS_3, + BTC_VHT_1SS_MCS_4, + BTC_VHT_1SS_MCS_5, + BTC_VHT_1SS_MCS_6, + BTC_VHT_1SS_MCS_7, + BTC_VHT_1SS_MCS_8, + BTC_VHT_1SS_MCS_9, + BTC_VHT_2SS_MCS_0, + BTC_VHT_2SS_MCS_1, + BTC_VHT_2SS_MCS_2, + BTC_VHT_2SS_MCS_3, + BTC_VHT_2SS_MCS_4, + BTC_VHT_2SS_MCS_5, + BTC_VHT_2SS_MCS_6, + BTC_VHT_2SS_MCS_7, + BTC_VHT_2SS_MCS_8, + BTC_VHT_2SS_MCS_9, + BTC_VHT_3SS_MCS_0, + BTC_VHT_3SS_MCS_1, + BTC_VHT_3SS_MCS_2, + BTC_VHT_3SS_MCS_3, + BTC_VHT_3SS_MCS_4, + BTC_VHT_3SS_MCS_5, + BTC_VHT_3SS_MCS_6, + BTC_VHT_3SS_MCS_7, + BTC_VHT_3SS_MCS_8, + BTC_VHT_3SS_MCS_9, + BTC_VHT_4SS_MCS_0, + BTC_VHT_4SS_MCS_1, + BTC_VHT_4SS_MCS_2, + BTC_VHT_4SS_MCS_3, + BTC_VHT_4SS_MCS_4, + BTC_VHT_4SS_MCS_5, + BTC_VHT_4SS_MCS_6, + BTC_VHT_4SS_MCS_7, + BTC_VHT_4SS_MCS_8, + BTC_VHT_4SS_MCS_9, + BTC_MCS_32, + BTC_UNKNOWN, + BTC_PKT_MGNT, + BTC_PKT_CTRL, + BTC_PKT_UNKNOWN, + BTC_PKT_NOT_FOR_ME, + BTC_RATE_MAX +}; + +enum { + BTC_MULTIPORT_SCC, + BTC_MULTIPORT_MCC_2CHANNEL, + BTC_MULTIPORT_MCC_2BAND, + BTC_MULTIPORT_MAX +}; + struct btc_bt_info { bool bt_disabled; u8 rssi_adjust_for_agc_table_on; @@ -454,6 +567,7 @@ struct btc_bt_info { u16 bt_hci_ver; u16 bt_real_fw_ver; u8 bt_fw_ver; + u32 bt_get_fw_ver; bool bt_disable_low_pwr; @@ -525,6 +639,7 @@ struct btc_bt_link_info { bool pan_exist; bool pan_only; bool slave_role; + bool acl_busy; }; enum btc_antenna_pos { @@ -625,8 +740,15 @@ struct btc_coexist { void (*btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset, u32 value); + u32 (*btc_get_bt_reg)(void *btc_context, u8 reg_type, u32 offset); u32 (*btc_get_bt_coex_supported_feature)(void *btcoexist); u32 (*btc_get_bt_coex_supported_version)(void *btcoexist); + u32 (*btc_get_bt_phydm_version)(void *btcoexist); + void (*btc_phydm_modify_ra_pcr_threshold)(void *btcoexist, + u8 ra_offset_direction, + u8 ra_threshold_offset); + u32 (*btc_phydm_query_phy_counter)(void *btcoexist, + enum dm_info_query dm_id); u8 (*btc_get_ant_det_val_from_bt)(void *btcoexist); u8 (*btc_get_ble_scan_type_from_bt)(void *btcoexist); u32 (*btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type); @@ -691,6 +813,8 @@ void exhalbtc_lps_leave(struct btc_coexist *btcoexist); void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist); void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist, u8 single_ant_path); +void halbtc_send_wifi_port_id_cmd(void *bt_context); +void halbtc_set_default_port_id_cmd(void *bt_context); /* The following are used by wifi_only case */ enum wifionly_chip_interface { diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 35b50be633f1..fd13d4ef53b8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -50,6 +50,11 @@ static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { {11, 0, 0, 28} }; +static const struct rtl_efuse_ops efuse_ops = { + .efuse_onebyte_read = efuse_one_byte_read, + .efuse_logical_map_read = efuse_shadow_read, +}; + static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset, u8 *value); static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset, @@ -1364,3 +1369,11 @@ void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen) *pfwlen = fwlen; } EXPORT_SYMBOL_GPL(rtl_fill_dummy); + +void rtl_efuse_ops_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->efuse.efuse_ops = &efuse_ops; +} +EXPORT_SYMBOL_GPL(rtl_efuse_ops_init); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index 952fdc288f0e..dfa31c13fc7a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -116,5 +116,5 @@ void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen); void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, u32 size); void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size); - +void rtl_efuse_ops_init(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 01ccf8884831..57bb8f049e59 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -31,7 +31,6 @@ #include "efuse.h" #include <linux/interrupt.h> #include <linux/export.h> -#include <linux/kmemleak.h> #include <linux/module.h> MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); @@ -2238,6 +2237,7 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); rtlpriv->intf_ops = &rtl_pci_ops; rtlpriv->glb_var = &rtl_global_var; + rtl_efuse_ops_init(hw); /* MEM map */ err = pci_request_regions(pdev, KBUILD_MODNAME); diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index d1cb7d405618..6c78c6dabbdf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -42,6 +42,23 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_sta_info *sta_entry = NULL; u16 wireless_mode = 0; + u8 nss; + struct ieee80211_tx_rate rate; + + switch (get_rf_type(rtlphy)) { + case RF_4T4R: + nss = 4; + break; + case RF_3T3R: + nss = 3; + break; + case RF_2T2R: + nss = 2; + break; + default: + nss = 1; + break; + } /* *this rate is no use for true rate, firmware @@ -66,28 +83,51 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, } else if (wireless_mode == WIRELESS_MODE_G) { return G_MODE_MAX_RIX; } else if (wireless_mode == WIRELESS_MODE_N_24G) { - if (get_rf_type(rtlphy) != RF_2T2R) + if (nss == 1) return N_MODE_MCS7_RIX; else return N_MODE_MCS15_RIX; } else if (wireless_mode == WIRELESS_MODE_AC_24G) { - return AC_MODE_MCS9_RIX; + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + ieee80211_rate_set_vht(&rate, + AC_MODE_MCS8_RIX, + nss); + goto out; + } else { + ieee80211_rate_set_vht(&rate, + AC_MODE_MCS9_RIX, + nss); + goto out; + } } return 0; } else { if (wireless_mode == WIRELESS_MODE_A) { return A_MODE_MAX_RIX; } else if (wireless_mode == WIRELESS_MODE_N_5G) { - if (get_rf_type(rtlphy) != RF_2T2R) + if (nss == 1) return N_MODE_MCS7_RIX; else return N_MODE_MCS15_RIX; } else if (wireless_mode == WIRELESS_MODE_AC_5G) { - return AC_MODE_MCS9_RIX; + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + ieee80211_rate_set_vht(&rate, + AC_MODE_MCS8_RIX, + nss); + goto out; + } else { + ieee80211_rate_set_vht(&rate, + AC_MODE_MCS9_RIX, + nss); + goto out; + } } return 0; } } + +out: + return rate.idx; } static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, @@ -111,9 +151,6 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, } rate->count = tries; rate->idx = rix >= 0x00 ? rix : 0x00; - if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE && - wireless_mode == WIRELESS_MODE_AC_5G) - rate->idx += 0x10;/*2NSS for 8812AE*/ if (!not_data) { if (txrc->short_preamble) @@ -126,10 +163,10 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, if (sta && sta->vht_cap.vht_supported) rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; } else { - if (mac->bw_40) - rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (mac->bw_80) rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + else if (mac->bw_40) + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; } if (sgi_20 || sgi_40 || sgi_80) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h index f2d9c6116e5c..8379a3e5198c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h @@ -142,7 +142,7 @@ /*wait power state to suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0 \ - /*0x04[12:11] = 2b'01enable WL suspend*/}, + /*0x04[12:11] = 2b'00 disable WL suspend*/}, #define RTL8188EE_TRANS_CARDEMU_TO_CARDDIS \ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ @@ -179,7 +179,7 @@ /*wait power state to suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0 \ - /*0x04[12:11] = 2b'01enable WL suspend*/}, + /*0x04[12:11] = 2b'00 disable WL suspend*/}, #define RTL8188EE_TRANS_CARDEMU_TO_PDN \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 015476e3f7e5..f3bff66e85d0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -32,7 +32,6 @@ #include "../rtl8192ce/def.h" #include "fw_common.h" #include <linux/export.h> -#include <linux/kmemleak.h> static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 9cff6bc4049c..cf551785eb08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; - else if (rtlpriv->dm.dynamic_txhighpower_lvl == - TXHIGHPWRLEVEL_BT2) - writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index ac4a82de40c7..9ab56827124e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -427,7 +427,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) (u32)hdr->addr1[0], (u32)hdr->addr1[1], (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4], (u32)hdr->addr1[5]); - memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); ieee80211_rx(hw, skb); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h index 781eeaa6af49..c570801508cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h @@ -134,7 +134,7 @@ /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO , PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, @@ -181,7 +181,7 @@ /*Lock small LDO Register*/ \ {0x00CC, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(2), 0}, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h index 4ac7db526f15..e6c3aac3e9fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h @@ -135,7 +135,7 @@ /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},\ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, @@ -172,7 +172,7 @@ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ PWR_CMD_POLLING, BIT(1), BIT(1)},\ - /*0x04[12:11] = 2b'00enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ PWR_CMD_WRITE, BIT(3)|BIT(4), 0},\ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index f9ccd13c79f9..e7bbbc95cdb1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) /* Configuration Space offset 0x70f BIT7 is used to control L0S */ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); - _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | + ASPM_L1_LATENCY << 3); /* Configuration Space offset 0x719 Bit3 is for L1 * BIT4 is for clock request diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h index 0fee5e0e55c2..3367cfbc9502 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h @@ -204,7 +204,7 @@ /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, @@ -251,7 +251,7 @@ /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ + /*0x04[12:11] = 2b'00 disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \ /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index b11365a5ee1f..9111ba7ff0a1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -1475,7 +1475,7 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, } } else if (method == MIX_MODE) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", rtldm->default_ofdm_index, rtldm->absolute_ofdm_swing_idx[rf_path], rf_path); @@ -1750,7 +1750,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( /*Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, @@ -1766,7 +1766,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( /*Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, @@ -1782,7 +1782,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( -1 * delta_swing_table_idx_tdown_a[delta]; /* Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, @@ -1799,7 +1799,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( /*Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]); } @@ -2115,7 +2115,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, } } else if (method == MIX_MODE) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", rtldm->default_ofdm_index, rtldm->absolute_ofdm_swing_idx[rf_path], rf_path); @@ -2329,7 +2329,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( /*Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, @@ -2345,7 +2345,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( -1 * delta_swing_table_idx_tdown_a[delta]; /* Record delta swing for mix mode power tracking*/ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h index 36b3e91d996e..6dd575435c63 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h @@ -531,7 +531,7 @@ extern struct wlan_pwr_cfg rtl8812_leave_lps_flow /*0x23[4] = 1b'0 12H LDO enter normal mode*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0 \ - /*0x04[12:11] = 2b'01enable WL suspend*/}, + /*0x04[12:11] = 2b'00 disable WL suspend*/}, #define RTL8821A_TRANS_CARDEMU_TO_CARDDIS \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ @@ -572,7 +572,7 @@ extern struct wlan_pwr_cfg rtl8812_leave_lps_flow /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0 \ - /*0x04[12:11] = 2b'01enable WL suspend*/},\ + /*0x04[12:11] = 2b'00 disable WL suspend*/},\ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ /*0x23[4] = 1b'0 12H LDO enter normal mode*/}, \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index ab5d462b1a3a..9bb3d9dfce79 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -328,6 +328,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .alt_fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, + .spec_ver = RTL_SPEC_SUPPORT_VHT, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = REG_SYS_CLKR, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 46dcb7fef195..d27e33960e77 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -154,10 +154,21 @@ enum rtl8192c_h2c_cmd { MAX_H2CCMD }; +enum { + H2C_BT_PORT_ID = 0x71, +}; + +#define GET_TX_REPORT_SN_V1(c2h) (c2h[6]) +#define GET_TX_REPORT_ST_V1(c2h) (c2h[0] & 0xC0) +#define GET_TX_REPORT_RETRY_V1(c2h) (c2h[2] & 0x3F) +#define GET_TX_REPORT_SN_V2(c2h) (c2h[6]) +#define GET_TX_REPORT_ST_V2(c2h) (c2h[7] & 0xC0) +#define GET_TX_REPORT_RETRY_V2(c2h) (c2h[8] & 0x3F) + #define MAX_TX_COUNT 4 #define MAX_REGULATION_NUM 4 #define MAX_RF_PATH_NUM 4 -#define MAX_RATE_SECTION_NUM 6 +#define MAX_RATE_SECTION_NUM 6 /* = MAX_RATE_SECTION */ #define MAX_2_4G_BANDWIDTH_NUM 4 #define MAX_5G_BANDWIDTH_NUM 4 #define MAX_RF_PATH 4 @@ -167,8 +178,9 @@ enum rtl8192c_h2c_cmd { #define TX_PWR_BY_RATE_NUM_BAND 2 #define TX_PWR_BY_RATE_NUM_RF 4 #define TX_PWR_BY_RATE_NUM_SECTION 12 -#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6 -#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5 +#define TX_PWR_BY_RATE_NUM_RATE 84 /* >= TX_PWR_BY_RATE_NUM_SECTION */ +#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6 /* MAX_RATE_SECTION */ +#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5 /* MAX_RATE_SECTION -1 */ #define BUFDESC_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */ @@ -264,6 +276,7 @@ enum rate_section { HT_MCS8_MCS15, VHT_1SSMCS0_1SSMCS9, VHT_2SSMCS0_2SSMCS9, + MAX_RATE_SECTION, }; enum intf_type { @@ -278,6 +291,13 @@ enum radio_path { RF90_PATH_D = 3, }; +enum radio_mask { + RF_MASK_A = BIT(0), + RF_MASK_B = BIT(1), + RF_MASK_C = BIT(2), + RF_MASK_D = BIT(3), +}; + enum regulation_txpwr_lmt { TXPWR_LMT_FCC = 0, TXPWR_LMT_MKK = 1, @@ -536,6 +556,7 @@ enum rt_oem_id { RT_CID_NETGEAR = 36, RT_CID_PLANEX = 37, RT_CID_CC_C = 38, + RT_CID_LENOVO_CHINA = 40, }; enum hw_descs { @@ -571,6 +592,7 @@ enum ht_channel_width { HT_CHANNEL_WIDTH_20 = 0, HT_CHANNEL_WIDTH_20_40 = 1, HT_CHANNEL_WIDTH_80 = 2, + HT_CHANNEL_WIDTH_MAX, }; /* Ref: 802.11i sepc D10.0 7.3.2.25.1 @@ -952,6 +974,40 @@ enum package_type { enum rtl_spec_ver { RTL_SPEC_NEW_RATEID = BIT(0), /* use ratr_table_mode_new */ + RTL_SPEC_SUPPORT_VHT = BIT(1), /* support VHT */ + RTL_SPEC_EXT_C2H = BIT(2), /* extend FW C2H (e.g. TX REPORT) */ +}; + +enum dm_info_query { + DM_INFO_FA_OFDM, + DM_INFO_FA_CCK, + DM_INFO_FA_TOTAL, + DM_INFO_CCA_OFDM, + DM_INFO_CCA_CCK, + DM_INFO_CCA_ALL, + DM_INFO_CRC32_OK_VHT, + DM_INFO_CRC32_OK_HT, + DM_INFO_CRC32_OK_LEGACY, + DM_INFO_CRC32_OK_CCK, + DM_INFO_CRC32_ERROR_VHT, + DM_INFO_CRC32_ERROR_HT, + DM_INFO_CRC32_ERROR_LEGACY, + DM_INFO_CRC32_ERROR_CCK, + DM_INFO_EDCCA_FLAG, + DM_INFO_OFDM_ENABLE, + DM_INFO_CCK_ENABLE, + DM_INFO_CRC32_OK_HT_AGG, + DM_INFO_CRC32_ERROR_HT_AGG, + DM_INFO_DBG_PORT_0, + DM_INFO_CURR_IGI, + DM_INFO_RSSI_MIN, + DM_INFO_RSSI_MAX, + DM_INFO_CLM_RATIO, + DM_INFO_NHM_RATIO, + DM_INFO_IQK_ALL, + DM_INFO_IQK_OK, + DM_INFO_IQK_NG, + DM_INFO_SIZE, }; struct octet_string { @@ -1277,7 +1333,7 @@ struct rtl_phy { u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND] [TX_PWR_BY_RATE_NUM_RF] [TX_PWR_BY_RATE_NUM_RF] - [TX_PWR_BY_RATE_NUM_SECTION]; + [TX_PWR_BY_RATE_NUM_RATE]; u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF] [TX_PWR_BY_RATE_NUM_RF] [MAX_BASE_NUM_IN_PHY_REG_PG_24G]; @@ -1794,6 +1850,7 @@ struct rtl_dm { #define EFUSE_MAX_LOGICAL_SIZE 512 struct rtl_efuse { + const struct rtl_efuse_ops *efuse_ops; bool autoLoad_ok; bool bootfromefuse; u16 max_physical_size; @@ -1899,6 +1956,12 @@ struct rtl_efuse { u8 channel_plan; }; +struct rtl_efuse_ops { + int (*efuse_onebyte_read)(struct ieee80211_hw *hw, u16 addr, u8 *data); + void (*efuse_logical_map_read)(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 *value); +}; + struct rtl_tx_report { atomic_t sn; u16 last_sent_sn; @@ -2231,6 +2294,7 @@ struct rtl_hal_ops { void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); + void (*set_default_port_id_cmd)(struct ieee80211_hw *hw); bool (*get_btc_status) (void); bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); u32 (*rx_command_packet)(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig index 7c5e4ca4e3d0..976c21866230 100644 --- a/drivers/net/wireless/rsi/Kconfig +++ b/drivers/net/wireless/rsi/Kconfig @@ -5,14 +5,15 @@ config WLAN_VENDOR_RSI If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_RSI config RSI_91X tristate "Redpine Signals Inc 91x WLAN driver support" + select BT_HCIRSI if RSI_COEX depends on MAC80211 ---help--- This option enabes support for RSI 1x1 devices. @@ -42,4 +43,14 @@ config RSI_USB This option enables the USB bus support in rsi drivers. Select M (recommended), if you have a RSI 1x1 wireless module. +config RSI_COEX + bool "Redpine Signals WLAN BT Coexistence support" + depends on BT && RSI_91X + depends on !(BT=m && RSI_91X=y) + default y + ---help--- + This option enables the WLAN BT coex support in rsi drivers. + Select M (recommended), if you have want to use this feature + and you have RS9113 module. + endif # WLAN_VENDOR_RSI diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile index 47c45908d894..ff87121a5928 100644 --- a/drivers/net/wireless/rsi/Makefile +++ b/drivers/net/wireless/rsi/Makefile @@ -5,6 +5,7 @@ rsi_91x-y += rsi_91x_mac80211.o rsi_91x-y += rsi_91x_mgmt.o rsi_91x-y += rsi_91x_hal.o rsi_91x-y += rsi_91x_ps.o +rsi_91x-$(CONFIG_RSI_COEX) += rsi_91x_coex.o rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c new file mode 100644 index 000000000000..d055099dadf1 --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -0,0 +1,179 @@ +/** + * Copyright (c) 2018 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "rsi_main.h" +#include "rsi_coex.h" +#include "rsi_mgmt.h" +#include "rsi_hal.h" + +static enum rsi_coex_queues rsi_coex_determine_coex_q + (struct rsi_coex_ctrl_block *coex_cb) +{ + enum rsi_coex_queues q_num = RSI_COEX_Q_INVALID; + + if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_COMMON]) > 0) + q_num = RSI_COEX_Q_COMMON; + if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]) > 0) + q_num = RSI_COEX_Q_BT; + if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_WLAN]) > 0) + q_num = RSI_COEX_Q_WLAN; + + return q_num; +} + +static void rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block *coex_cb) +{ + enum rsi_coex_queues coex_q = RSI_COEX_Q_INVALID; + struct sk_buff *skb; + + do { + coex_q = rsi_coex_determine_coex_q(coex_cb); + rsi_dbg(INFO_ZONE, "queue = %d\n", coex_q); + + if (coex_q == RSI_COEX_Q_BT) { + skb = skb_dequeue(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]); + rsi_send_bt_pkt(coex_cb->priv, skb); + } + } while (coex_q != RSI_COEX_Q_INVALID); +} + +static void rsi_coex_scheduler_thread(struct rsi_common *common) +{ + struct rsi_coex_ctrl_block *coex_cb = + (struct rsi_coex_ctrl_block *)common->coex_cb; + u32 timeout = EVENT_WAIT_FOREVER; + + do { + rsi_wait_event(&coex_cb->coex_tx_thread.event, timeout); + rsi_reset_event(&coex_cb->coex_tx_thread.event); + + rsi_coex_sched_tx_pkts(coex_cb); + } while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0); + + complete_and_exit(&coex_cb->coex_tx_thread.completion, 0); +} + +int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) +{ + u8 msg_type = msg[RSI_RX_DESC_MSG_TYPE_OFFSET]; + + switch (msg_type) { + case COMMON_CARD_READY_IND: + rsi_dbg(INFO_ZONE, "common card ready received\n"); + rsi_handle_card_ready(common, msg); + break; + case SLEEP_NOTIFY_IND: + rsi_dbg(INFO_ZONE, "sleep notify received\n"); + rsi_mgmt_pkt_recv(common, msg); + break; + } + + return 0; +} + +static inline int rsi_map_coex_q(u8 hal_queue) +{ + switch (hal_queue) { + case RSI_COEX_Q: + return RSI_COEX_Q_COMMON; + case RSI_WLAN_Q: + return RSI_COEX_Q_WLAN; + case RSI_BT_Q: + return RSI_COEX_Q_BT; + } + return RSI_COEX_Q_INVALID; +} + +int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue) +{ + struct rsi_common *common = (struct rsi_common *)priv; + struct rsi_coex_ctrl_block *coex_cb = + (struct rsi_coex_ctrl_block *)common->coex_cb; + struct skb_info *tx_params = NULL; + enum rsi_coex_queues coex_q; + int status; + + coex_q = rsi_map_coex_q(hal_queue); + if (coex_q == RSI_COEX_Q_INVALID) { + rsi_dbg(ERR_ZONE, "Invalid coex queue\n"); + return -EINVAL; + } + if (coex_q != RSI_COEX_Q_COMMON && + coex_q != RSI_COEX_Q_WLAN) { + skb_queue_tail(&coex_cb->coex_tx_qs[coex_q], skb); + rsi_set_event(&coex_cb->coex_tx_thread.event); + return 0; + } + if (common->iface_down) { + tx_params = + (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; + + if (!(tx_params->flags & INTERNAL_MGMT_PKT)) { + rsi_indicate_tx_status(common->priv, skb, -EINVAL); + return 0; + } + } + + /* Send packet to hal */ + if (skb->priority == MGMT_SOFT_Q) + status = rsi_send_mgmt_pkt(common, skb); + else + status = rsi_send_data_pkt(common, skb); + + return status; +} + +int rsi_coex_attach(struct rsi_common *common) +{ + struct rsi_coex_ctrl_block *coex_cb; + int cnt; + + coex_cb = kzalloc(sizeof(*coex_cb), GFP_KERNEL); + if (!coex_cb) + return -ENOMEM; + + common->coex_cb = (void *)coex_cb; + coex_cb->priv = common; + + /* Initialize co-ex queues */ + for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++) + skb_queue_head_init(&coex_cb->coex_tx_qs[cnt]); + rsi_init_event(&coex_cb->coex_tx_thread.event); + + /* Initialize co-ex thread */ + if (rsi_create_kthread(common, + &coex_cb->coex_tx_thread, + rsi_coex_scheduler_thread, + "Coex-Tx-Thread")) { + rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__); + return -EINVAL; + } + return 0; +} + +void rsi_coex_detach(struct rsi_common *common) +{ + struct rsi_coex_ctrl_block *coex_cb = + (struct rsi_coex_ctrl_block *)common->coex_cb; + int cnt; + + rsi_kill_thread(&coex_cb->coex_tx_thread); + + for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++) + skb_queue_purge(&coex_cb->coex_tx_qs[cnt]); + + kfree(coex_cb); +} diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index d0d2201830e8..5dafd2e1306c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -17,6 +17,7 @@ #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_hal.h" +#include "rsi_coex.h" /** * rsi_determine_min_weight_queue() - This function determines the queue with @@ -301,14 +302,23 @@ void rsi_core_qos_processor(struct rsi_common *common) mutex_unlock(&common->tx_lock); break; } - - if (q_num == MGMT_SOFT_Q) { - status = rsi_send_mgmt_pkt(common, skb); - } else if (q_num == MGMT_BEACON_Q) { + if (q_num == MGMT_BEACON_Q) { status = rsi_send_pkt_to_bus(common, skb); dev_kfree_skb(skb); } else { - status = rsi_send_data_pkt(common, skb); +#ifdef CONFIG_RSI_COEX + if (common->coex_mode > 1) { + status = rsi_coex_send_pkt(common, skb, + RSI_WLAN_Q); + } else { +#endif + if (q_num == MGMT_SOFT_Q) + status = rsi_send_mgmt_pkt(common, skb); + else + status = rsi_send_data_pkt(common, skb); +#ifdef CONFIG_RSI_COEX + } +#endif } if (status) { diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 1176de646942..de608ae365a4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -15,6 +15,7 @@ */ #include <linux/firmware.h> +#include <net/bluetooth/bluetooth.h> #include "rsi_mgmt.h" #include "rsi_hal.h" #include "rsi_sdio.h" @@ -24,6 +25,7 @@ static struct ta_metadata metadata_flash_content[] = { {"flash_content", 0x00010000}, {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, + {"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000}, }; int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) @@ -31,8 +33,15 @@ int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; int status; + if (common->coex_mode > 1) + mutex_lock(&common->tx_bus_mutex); + status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, skb->len); + + if (common->coex_mode > 1) + mutex_unlock(&common->tx_bus_mutex); + return status; } @@ -296,8 +305,7 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) if (status) goto err; - status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, - skb->len); + status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); @@ -342,8 +350,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, goto err; rsi_prepare_mgmt_desc(common, skb); - status = adapter->host_intf_ops->write_pkt(common->priv, - (u8 *)skb->data, skb->len); + status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); @@ -352,6 +359,43 @@ err: return status; } +int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb) +{ + int status = -EINVAL; + u8 header_size = 0; + struct rsi_bt_desc *bt_desc; + u8 queueno = ((skb->data[1] >> 4) & 0xf); + + if (queueno == RSI_BT_MGMT_Q) { + status = rsi_send_pkt_to_bus(common, skb); + if (status) + rsi_dbg(ERR_ZONE, "%s: Failed to write bt mgmt pkt\n", + __func__); + goto out; + } + header_size = FRAME_DESC_SZ; + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, "%s: Not enough headroom\n", __func__); + status = -ENOSPC; + goto out; + } + skb_push(skb, header_size); + memset(skb->data, 0, header_size); + bt_desc = (struct rsi_bt_desc *)skb->data; + + rsi_set_len_qno(&bt_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_BT_DATA_Q); + bt_desc->bt_pkt_type = cpu_to_le16(bt_cb(skb)->pkt_type); + + status = rsi_send_pkt_to_bus(common, skb); + if (status) + rsi_dbg(ERR_ZONE, "%s: Failed to write bt pkt\n", __func__); + +out: + dev_kfree_skb(skb); + return status; +} + int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = (struct rsi_hw *)common->priv; @@ -926,10 +970,6 @@ int rsi_hal_device_init(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; - common->coex_mode = RSI_DEV_COEX_MODE_WIFI_ALONE; - common->oper_mode = RSI_DEV_OPMODE_WIFI_ALONE; - adapter->device_model = RSI_DEV_9113; - switch (adapter->device_model) { case RSI_DEV_9113: if (rsi_load_firmware(adapter)) { diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 0cb8e68bab58..1485a0c89df2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -18,8 +18,10 @@ #include <linux/module.h> #include <linux/firmware.h> +#include <net/rsi_91x.h> #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_coex.h" #include "rsi_hal.h" u32 rsi_zone_enabled = /* INFO_ZONE | @@ -34,6 +36,14 @@ u32 rsi_zone_enabled = /* INFO_ZONE | 0; EXPORT_SYMBOL_GPL(rsi_zone_enabled); +#ifdef CONFIG_RSI_COEX +static struct rsi_proto_ops g_proto_ops = { + .coex_send_pkt = rsi_coex_send_pkt, + .get_host_intf = rsi_get_host_intf, + .set_bt_context = rsi_set_bt_context, +}; +#endif + /** * rsi_dbg() - This function outputs informational messages. * @zone: Zone of interest for output message. @@ -60,8 +70,24 @@ EXPORT_SYMBOL_GPL(rsi_dbg); static char *opmode_str(int oper_mode) { switch (oper_mode) { - case RSI_DEV_OPMODE_WIFI_ALONE: + case DEV_OPMODE_WIFI_ALONE: return "Wi-Fi alone"; + case DEV_OPMODE_BT_ALONE: + return "BT EDR alone"; + case DEV_OPMODE_BT_LE_ALONE: + return "BT LE alone"; + case DEV_OPMODE_BT_DUAL: + return "BT Dual"; + case DEV_OPMODE_STA_BT: + return "Wi-Fi STA + BT EDR"; + case DEV_OPMODE_STA_BT_LE: + return "Wi-Fi STA + BT LE"; + case DEV_OPMODE_STA_BT_DUAL: + return "Wi-Fi STA + BT DUAL"; + case DEV_OPMODE_AP_BT: + return "Wi-Fi AP + BT EDR"; + case DEV_OPMODE_AP_BT_DUAL: + return "Wi-Fi AP + BT DUAL"; } return "Unknown"; @@ -137,16 +163,19 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, * * Return: 0 on success, -1 on failure. */ -int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len) +int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) { u8 *frame_desc = NULL, extended_desc = 0; u32 index, length = 0, queueno = 0; u16 actual_length = 0, offset; struct sk_buff *skb = NULL; +#ifdef CONFIG_RSI_COEX + u8 bt_pkt_type; +#endif index = 0; do { - frame_desc = &common->rx_data_pkt[index]; + frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; @@ -160,8 +189,15 @@ int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len) switch (queueno) { case RSI_COEX_Q: - rsi_mgmt_pkt_recv(common, (frame_desc + offset)); +#ifdef CONFIG_RSI_COEX + if (common->coex_mode > 1) + rsi_coex_recv_pkt(common, frame_desc + offset); + else +#endif + rsi_mgmt_pkt_recv(common, + (frame_desc + offset)); break; + case RSI_WIFI_DATA_Q: skb = rsi_prepare_skb(common, (frame_desc + offset), @@ -177,6 +213,25 @@ int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len) rsi_mgmt_pkt_recv(common, (frame_desc + offset)); break; +#ifdef CONFIG_RSI_COEX + case RSI_BT_MGMT_Q: + case RSI_BT_DATA_Q: +#define BT_RX_PKT_TYPE_OFST 14 +#define BT_CARD_READY_IND 0x89 + bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST]; + if (bt_pkt_type == BT_CARD_READY_IND) { + rsi_dbg(INFO_ZONE, "BT Card ready recvd\n"); + if (rsi_bt_ops.attach(common, &g_proto_ops)) + rsi_dbg(ERR_ZONE, + "Failed to attach BT module\n"); + } else { + if (common->bt_adapter) + rsi_bt_ops.recv_pkt(common->bt_adapter, + frame_desc + offset); + } + break; +#endif + default: rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n", __func__, queueno); @@ -217,13 +272,29 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) complete_and_exit(&common->tx_thread.completion, 0); } +#ifdef CONFIG_RSI_COEX +enum rsi_host_intf rsi_get_host_intf(void *priv) +{ + struct rsi_common *common = (struct rsi_common *)priv; + + return common->priv->rsi_host_intf; +} + +void rsi_set_bt_context(void *priv, void *bt_context) +{ + struct rsi_common *common = (struct rsi_common *)priv; + + common->bt_adapter = bt_context; +} +#endif + /** * rsi_91x_init() - This function initializes os interface operations. * @void: Void. * * Return: Pointer to the adapter structure on success, NULL on failure . */ -struct rsi_hw *rsi_91x_init(void) +struct rsi_hw *rsi_91x_init(u16 oper_mode) { struct rsi_hw *adapter = NULL; struct rsi_common *common = NULL; @@ -251,6 +322,7 @@ struct rsi_hw *rsi_91x_init(void) mutex_init(&common->mutex); mutex_init(&common->tx_lock); mutex_init(&common->rx_lock); + mutex_init(&common->tx_bus_mutex); if (rsi_create_kthread(common, &common->tx_thread, @@ -265,6 +337,43 @@ struct rsi_hw *rsi_91x_init(void) timer_setup(&common->roc_timer, rsi_roc_timeout, 0); init_completion(&common->wlan_init_completion); common->init_done = true; + adapter->device_model = RSI_DEV_9113; + common->oper_mode = oper_mode; + + /* Determine coex mode */ + switch (common->oper_mode) { + case DEV_OPMODE_STA_BT_DUAL: + case DEV_OPMODE_STA_BT: + case DEV_OPMODE_STA_BT_LE: + case DEV_OPMODE_BT_ALONE: + case DEV_OPMODE_BT_LE_ALONE: + case DEV_OPMODE_BT_DUAL: + common->coex_mode = 2; + break; + case DEV_OPMODE_AP_BT_DUAL: + case DEV_OPMODE_AP_BT: + common->coex_mode = 4; + break; + case DEV_OPMODE_WIFI_ALONE: + common->coex_mode = 1; + break; + default: + common->oper_mode = 1; + common->coex_mode = 1; + } + rsi_dbg(INFO_ZONE, "%s: oper_mode = %d, coex_mode = %d\n", + __func__, common->oper_mode, common->coex_mode); + + adapter->device_model = RSI_DEV_9113; +#ifdef CONFIG_RSI_COEX + if (common->coex_mode > 1) { + if (rsi_coex_attach(common)) { + rsi_dbg(ERR_ZONE, "Failed to init coex module\n"); + goto err; + } + } +#endif + return adapter; err: @@ -292,6 +401,16 @@ void rsi_91x_deinit(struct rsi_hw *adapter) for (ii = 0; ii < NUM_SOFT_QUEUES; ii++) skb_queue_purge(&common->tx_queue[ii]); +#ifdef CONFIG_RSI_COEX + if (common->coex_mode > 1) { + if (common->bt_adapter) { + rsi_bt_ops.detach(common->bt_adapter); + common->bt_adapter = NULL; + } + rsi_coex_detach(common); + } +#endif + common->init_done = false; kfree(common); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 46c9d5470dfb..c21fca750fd4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1791,7 +1791,7 @@ out: return -EINVAL; } -static int rsi_handle_card_ready(struct rsi_common *common, u8 *msg) +int rsi_handle_card_ready(struct rsi_common *common, u8 *msg) { switch (common->fsm_state) { case FSM_CARD_NOT_READY: diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index b0cf41195051..d76e69c0beaa 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -18,8 +18,17 @@ #include <linux/module.h> #include "rsi_sdio.h" #include "rsi_common.h" +#include "rsi_coex.h" #include "rsi_hal.h" +/* Default operating mode is wlan STA + BT */ +static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL; +module_param(dev_oper_mode, ushort, 0444); +MODULE_PARM_DESC(dev_oper_mode, + "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n" + "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n" + "6[AP + BT classic], 14[AP + BT classic + BT LE]"); + /** * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg. * @rw: Read/write @@ -567,7 +576,7 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, { u32 num_blocks, offset, i; u16 msb_address, lsb_address; - u8 temp_buf[block_size]; + u8 *temp_buf; int status; num_blocks = instructions_sz / block_size; @@ -576,11 +585,15 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, rsi_dbg(INFO_ZONE, "ins_size: %d, num_blocks: %d\n", instructions_sz, num_blocks); + temp_buf = kmalloc(block_size, GFP_KERNEL); + if (!temp_buf) + return -ENOMEM; + /* Loading DM ms word in the sdio slave */ status = rsi_sdio_master_access_msword(adapter, msb_address); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n", __func__); - return status; + goto out_free; } for (offset = 0, i = 0; i < num_blocks; i++, offset += block_size) { @@ -592,7 +605,7 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, temp_buf, block_size); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: failed to write\n", __func__); - return status; + goto out_free; } rsi_dbg(INFO_ZONE, "%s: loading block: %d\n", __func__, i); base_address += block_size; @@ -607,7 +620,7 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n", __func__); - return status; + goto out_free; } } } @@ -623,12 +636,16 @@ static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter, temp_buf, instructions_sz % block_size); if (status < 0) - return status; + goto out_free; rsi_dbg(INFO_ZONE, "Written Last Block in Address 0x%x Successfully\n", offset | RSI_SD_REQUEST_MASTER); } - return 0; + + status = 0; +out_free: + kfree(temp_buf); + return status; } #define FLASH_SIZE_ADDR 0x04000016 @@ -636,11 +653,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, u32 *read_buf, u16 size) { u32 addr_on_bus, *data; - u32 align[2] = {}; u16 ms_addr; int status; - data = PTR_ALIGN(&align[0], 8); + data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data = PTR_ALIGN(data, 8); ms_addr = (addr >> 16); status = rsi_sdio_master_access_msword(adapter, ms_addr); @@ -648,7 +668,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto err; } addr &= 0xFFFF; @@ -666,7 +686,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, (u8 *)data, 4); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__); - return status; + goto err; } if (size == 2) { if ((addr & 0x3) == 0) @@ -688,17 +708,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, *read_buf = *data; } - return 0; +err: + kfree(data); + return status; } static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, unsigned long addr, unsigned long data, u16 size) { - unsigned long data1[2], *data_aligned; + unsigned long *data_aligned; int status; - data_aligned = PTR_ALIGN(&data1[0], 8); + data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data_aligned) + return -ENOMEM; + + data_aligned = PTR_ALIGN(data_aligned, 8); if (size == 2) { *data_aligned = ((data << 16) | (data & 0xFFFF)); @@ -717,6 +743,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); + kfree(data_aligned); return -EIO; } addr = addr & 0xFFFF; @@ -726,12 +753,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, (adapter, (addr | RSI_SD_REQUEST_MASTER), (u8 *)data_aligned, size); - if (status < 0) { + if (status < 0) rsi_dbg(ERR_ZONE, "%s: Unable to do AHB reg write\n", __func__); - return status; - } - return 0; + + kfree(data_aligned); + return status; } /** @@ -754,6 +781,8 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter, int status; queueno = ((pkt[1] >> 4) & 0xf); + if (queueno == RSI_BT_MGMT_Q || queueno == RSI_BT_DATA_Q) + queueno = RSI_BT_Q; num_blocks = len / block_size; @@ -922,14 +951,16 @@ static int rsi_probe(struct sdio_func *pfunction, const struct sdio_device_id *id) { struct rsi_hw *adapter; + struct rsi_91x_sdiodev *sdev; + int status; rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__); - adapter = rsi_91x_init(); + adapter = rsi_91x_init(dev_oper_mode); if (!adapter) { rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n", __func__); - return 1; + return -EINVAL; } adapter->rsi_host_intf = RSI_HOST_INTF_SDIO; adapter->host_intf_ops = &sdio_host_intf_ops; @@ -937,39 +968,61 @@ static int rsi_probe(struct sdio_func *pfunction, if (rsi_init_sdio_interface(adapter, pfunction)) { rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n", __func__); - goto fail; + status = -EIO; + goto fail_free_adapter; } + sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + rsi_init_event(&sdev->rx_thread.event); + status = rsi_create_kthread(adapter->priv, &sdev->rx_thread, + rsi_sdio_rx_thread, "SDIO-RX-Thread"); + if (status) { + rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__); + goto fail_kill_thread; + } + skb_queue_head_init(&sdev->rx_q.head); + sdev->rx_q.num_rx_pkts = 0; + sdio_claim_host(pfunction); if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) { rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__); sdio_release_host(pfunction); - goto fail; + status = -EIO; + goto fail_claim_irq; } sdio_release_host(pfunction); rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__); if (rsi_hal_device_init(adapter)) { rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__); - sdio_claim_host(pfunction); - sdio_release_irq(pfunction); - sdio_disable_func(pfunction); - sdio_release_host(pfunction); - goto fail; + status = -EINVAL; + goto fail_dev_init; } rsi_dbg(INFO_ZONE, "===> RSI Device Init Done <===\n"); if (rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR)) { rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n", __func__); - return -EIO; + status = -EIO; + goto fail_dev_init; } adapter->priv->hibernate_resume = false; adapter->priv->reinit_hw = false; return 0; -fail: + +fail_dev_init: + sdio_claim_host(pfunction); + sdio_release_irq(pfunction); + sdio_release_host(pfunction); +fail_claim_irq: + rsi_kill_thread(&sdev->rx_thread); +fail_kill_thread: + sdio_claim_host(pfunction); + sdio_disable_func(pfunction); + sdio_release_host(pfunction); +fail_free_adapter: rsi_91x_deinit(adapter); rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__); - return 1; + return status; } static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, @@ -1065,6 +1118,8 @@ static void rsi_disconnect(struct sdio_func *pfunction) return; dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + + rsi_kill_thread(&dev->rx_thread); sdio_claim_host(pfunction); sdio_release_irq(pfunction); sdio_release_host(pfunction); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 8e2a95c486b0..612c211e21a1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -16,6 +16,7 @@ */ #include <linux/firmware.h> +#include <net/rsi_91x.h> #include "rsi_sdio.h" #include "rsi_common.h" @@ -59,6 +60,43 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) return status; } +void rsi_sdio_rx_thread(struct rsi_common *common) +{ + struct rsi_hw *adapter = common->priv; + struct rsi_91x_sdiodev *sdev = adapter->rsi_dev; + struct sk_buff *skb; + int status; + + do { + rsi_wait_event(&sdev->rx_thread.event, EVENT_WAIT_FOREVER); + rsi_reset_event(&sdev->rx_thread.event); + + while (true) { + if (atomic_read(&sdev->rx_thread.thread_done)) + goto out; + + skb = skb_dequeue(&sdev->rx_q.head); + if (!skb) + break; + if (sdev->rx_q.num_rx_pkts > 0) + sdev->rx_q.num_rx_pkts--; + status = rsi_read_pkt(common, skb->data, skb->len); + if (status) { + rsi_dbg(ERR_ZONE, "Failed to read the packet\n"); + dev_kfree_skb(skb); + break; + } + dev_kfree_skb(skb); + } + } while (1); + +out: + rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); + skb_queue_purge(&sdev->rx_q.head); + atomic_inc(&sdev->rx_thread.thread_done); + complete_and_exit(&sdev->rx_thread.completion, 0); +} + /** * rsi_process_pkt() - This Function reads rx_blocks register and figures out * the size of the rx pkt. @@ -75,6 +113,10 @@ static int rsi_process_pkt(struct rsi_common *common) u32 rcv_pkt_len = 0; int status = 0; u8 value = 0; + struct sk_buff *skb; + + if (dev->rx_q.num_rx_pkts >= RSI_MAX_RX_PKTS) + return 0; num_blks = ((adapter->interrupt_status & 1) | ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); @@ -102,27 +144,24 @@ static int rsi_process_pkt(struct rsi_common *common) rcv_pkt_len = (num_blks * 256); - common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL); - if (!common->rx_data_pkt) { - rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n", - __func__); + skb = dev_alloc_skb(rcv_pkt_len); + if (!skb) return -ENOMEM; - } - status = rsi_sdio_host_intf_read_pkt(adapter, - common->rx_data_pkt, - rcv_pkt_len); + status = rsi_sdio_host_intf_read_pkt(adapter, skb->data, rcv_pkt_len); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n", __func__); - goto fail; + dev_kfree_skb(skb); + return status; } + skb_put(skb, rcv_pkt_len); + skb_queue_tail(&dev->rx_q.head, skb); + dev->rx_q.num_rx_pkts++; - status = rsi_read_pkt(common, rcv_pkt_len); + rsi_set_event(&dev->rx_thread.event); -fail: - kfree(common->rx_data_pkt); - return status; + return 0; } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 8f8443833348..7b8bae313aa9 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -16,8 +16,20 @@ */ #include <linux/module.h> +#include <net/rsi_91x.h> #include "rsi_usb.h" #include "rsi_hal.h" +#include "rsi_coex.h" + +/* Default operating mode is wlan STA + BT */ +static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL; +module_param(dev_oper_mode, ushort, 0444); +MODULE_PARM_DESC(dev_oper_mode, + "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n" + "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n" + "6[AP + BT classic], 14[AP + BT classic + BT LE]"); + +static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num); /** * rsi_usb_card_write() - This function writes to the USB Card. @@ -103,41 +115,41 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface, struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __le16 buffer_size; - int ii, bep_found = 0; + int ii, bin_found = 0, bout_found = 0; iface_desc = &(interface->altsetting[0]); for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) { endpoint = &(iface_desc->endpoint[ii].desc); - if ((!(dev->bulkin_endpoint_addr)) && + if (!dev->bulkin_endpoint_addr[bin_found] && (endpoint->bEndpointAddress & USB_DIR_IN) && - ((endpoint->bmAttributes & - USB_ENDPOINT_XFERTYPE_MASK) == + ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { buffer_size = endpoint->wMaxPacketSize; - dev->bulkin_size = buffer_size; - dev->bulkin_endpoint_addr = + dev->bulkin_size[bin_found] = buffer_size; + dev->bulkin_endpoint_addr[bin_found] = endpoint->bEndpointAddress; + bin_found++; } - if (!dev->bulkout_endpoint_addr[bep_found] && + if (!dev->bulkout_endpoint_addr[bout_found] && !(endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == - USB_ENDPOINT_XFER_BULK)) { - dev->bulkout_endpoint_addr[bep_found] = - endpoint->bEndpointAddress; + USB_ENDPOINT_XFER_BULK)) { buffer_size = endpoint->wMaxPacketSize; - dev->bulkout_size[bep_found] = buffer_size; - bep_found++; + dev->bulkout_endpoint_addr[bout_found] = + endpoint->bEndpointAddress; + dev->bulkout_size[bout_found] = buffer_size; + bout_found++; } - if (bep_found >= MAX_BULK_EP) + if (bin_found >= MAX_BULK_EP || bout_found >= MAX_BULK_EP) break; } - if (!(dev->bulkin_endpoint_addr) && - (dev->bulkout_endpoint_addr[0])) + if (!(dev->bulkin_endpoint_addr[0]) && + dev->bulkout_endpoint_addr[0]) return -EINVAL; return 0; @@ -247,13 +259,33 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, */ static void rsi_rx_done_handler(struct urb *urb) { - struct rsi_hw *adapter = urb->context; - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rx_usb_ctrl_block *rx_cb = urb->context; + struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data; + int status = -EINVAL; if (urb->status) - return; + goto out; + + if (urb->actual_length <= 0) { + rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__); + goto out; + } + if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) { + rsi_dbg(INFO_ZONE, "Max RX packets reached\n"); + goto out; + } + skb_put(rx_cb->rx_skb, urb->actual_length); + skb_queue_tail(&dev->rx_q, rx_cb->rx_skb); rsi_set_event(&dev->rx_thread.event); + status = 0; + +out: + if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num)) + rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); + + if (status) + dev_kfree_skb(rx_cb->rx_skb); } /** @@ -262,20 +294,34 @@ static void rsi_rx_done_handler(struct urb *urb) * * Return: 0 on success, a negative error code on failure. */ -static int rsi_rx_urb_submit(struct rsi_hw *adapter) +static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) { struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; - struct urb *urb = dev->rx_usb_urb[0]; + struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; + struct urb *urb = rx_cb->rx_urb; int status; + struct sk_buff *skb; + u8 dword_align_bytes = 0; + +#define RSI_MAX_RX_USB_PKT_SIZE 3000 + skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MAX_DWORD_ALIGN_BYTES); + dword_align_bytes = (unsigned long)skb->data & 0x3f; + if (dword_align_bytes > 0) + skb_push(skb, dword_align_bytes); + urb->transfer_buffer = skb->data; + rx_cb->rx_skb = skb; usb_fill_bulk_urb(urb, dev->usbdev, usb_rcvbulkpipe(dev->usbdev, - dev->bulkin_endpoint_addr), + dev->bulkin_endpoint_addr[ep_num - 1]), urb->transfer_buffer, - 3000, + RSI_MAX_RX_USB_PKT_SIZE, rsi_rx_done_handler, - adapter); + rx_cb); status = usb_submit_urb(urb, GFP_KERNEL); if (status) @@ -487,11 +533,51 @@ static void rsi_deinit_usb_interface(struct rsi_hw *adapter) struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; rsi_kill_thread(&dev->rx_thread); - usb_free_urb(dev->rx_usb_urb[0]); - kfree(adapter->priv->rx_data_pkt); + + usb_free_urb(dev->rx_cb[0].rx_urb); + if (adapter->priv->coex_mode > 1) + usb_free_urb(dev->rx_cb[1].rx_urb); + kfree(dev->tx_buffer); } +static int rsi_usb_init_rx(struct rsi_hw *adapter) +{ + struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rx_usb_ctrl_block *rx_cb; + u8 idx, num_rx_cb; + + num_rx_cb = (adapter->priv->coex_mode > 1 ? 2 : 1); + + for (idx = 0; idx < num_rx_cb; idx++) { + rx_cb = &dev->rx_cb[idx]; + + rx_cb->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rx_cb->rx_urb) { + rsi_dbg(ERR_ZONE, "Failed alloc rx urb[%d]\n", idx); + goto err; + } + rx_cb->ep_num = idx + 1; + rx_cb->data = (void *)dev; + } + skb_queue_head_init(&dev->rx_q); + rsi_init_event(&dev->rx_thread.event); + if (rsi_create_kthread(adapter->priv, &dev->rx_thread, + rsi_usb_rx_thread, "RX-Thread")) { + rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__); + goto err; + } + + return 0; + +err: + usb_free_urb(dev->rx_cb[0].rx_urb); + if (adapter->priv->coex_mode > 1) + usb_free_urb(dev->rx_cb[1].rx_urb); + + return -1; +} + /** * rsi_init_usb_interface() - This function initializes the usb interface. * @adapter: Pointer to the adapter structure. @@ -503,7 +589,6 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, struct usb_interface *pfunction) { struct rsi_91x_usbdev *rsi_dev; - struct rsi_common *common = adapter->priv; int status; rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL); @@ -512,49 +597,37 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, adapter->rsi_dev = rsi_dev; rsi_dev->usbdev = interface_to_usbdev(pfunction); + rsi_dev->priv = (void *)adapter; - if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter)) - return -EINVAL; + if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter)) { + status = -EINVAL; + goto fail_eps; + } adapter->device = &pfunction->dev; usb_set_intfdata(pfunction, adapter); - common->rx_data_pkt = kmalloc(2048, GFP_KERNEL); - if (!common->rx_data_pkt) { - rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n", - __func__); - return -ENOMEM; - } - rsi_dev->tx_buffer = kmalloc(2048, GFP_KERNEL); if (!rsi_dev->tx_buffer) { status = -ENOMEM; - goto fail_tx; + goto fail_eps; } - rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL); - if (!rsi_dev->rx_usb_urb[0]) { + + if (rsi_usb_init_rx(adapter)) { + rsi_dbg(ERR_ZONE, "Failed to init RX handle\n"); status = -ENOMEM; goto fail_rx; } - rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt; + rsi_dev->tx_blk_size = 252; adapter->block_size = rsi_dev->tx_blk_size; /* Initializing function callbacks */ - adapter->rx_urb_submit = rsi_rx_urb_submit; adapter->check_hw_queue_status = rsi_usb_check_queue_status; adapter->determine_event_timeout = rsi_usb_event_timeout; adapter->rsi_host_intf = RSI_HOST_INTF_USB; adapter->host_intf_ops = &usb_host_intf_ops; - rsi_init_event(&rsi_dev->rx_thread.event); - status = rsi_create_kthread(common, &rsi_dev->rx_thread, - rsi_usb_rx_thread, "RX-Thread"); - if (status) { - rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__); - goto fail_thread; - } - #ifdef CONFIG_RSI_DEBUGFS /* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */ adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1); @@ -563,12 +636,12 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__); return 0; -fail_thread: - usb_free_urb(rsi_dev->rx_usb_urb[0]); fail_rx: kfree(rsi_dev->tx_buffer); -fail_tx: - kfree(common->rx_data_pkt); + +fail_eps: + kfree(rsi_dev); + return status; } @@ -662,7 +735,7 @@ static int rsi_probe(struct usb_interface *pfunction, rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__); - adapter = rsi_91x_init(); + adapter = rsi_91x_init(dev_oper_mode); if (!adapter) { rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n", __func__); @@ -698,10 +771,16 @@ static int rsi_probe(struct usb_interface *pfunction, rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__); } - status = rsi_rx_urb_submit(adapter); + status = rsi_rx_urb_submit(adapter, WLAN_EP); if (status) goto err1; + if (adapter->priv->coex_mode > 1) { + status = rsi_rx_urb_submit(adapter, BT_EP); + if (status) + goto err1; + } + return 0; err1: rsi_deinit_usb_interface(adapter); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 465692b3c351..b1687d22f73f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -30,31 +30,32 @@ void rsi_usb_rx_thread(struct rsi_common *common) struct rsi_hw *adapter = common->priv; struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; int status; + struct sk_buff *skb; do { rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER); + rsi_reset_event(&dev->rx_thread.event); - if (atomic_read(&dev->rx_thread.thread_done)) - goto out; + while (true) { + if (atomic_read(&dev->rx_thread.thread_done)) + goto out; - mutex_lock(&common->rx_lock); - status = rsi_read_pkt(common, 0); - if (status) { - rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__); - mutex_unlock(&common->rx_lock); - return; - } - mutex_unlock(&common->rx_lock); - rsi_reset_event(&dev->rx_thread.event); - if (adapter->rx_urb_submit(adapter)) { - rsi_dbg(ERR_ZONE, - "%s: Failed in urb submission", __func__); - return; + skb = skb_dequeue(&dev->rx_q); + if (!skb) + break; + status = rsi_read_pkt(common, skb->data, 0); + if (status) { + rsi_dbg(ERR_ZONE, "%s: Failed To read data", + __func__); + break; + } + dev_kfree_skb(skb); } } while (1); out: rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__); + skb_queue_purge(&dev->rx_q); complete_and_exit(&dev->rx_thread.completion, 0); } diff --git a/drivers/net/wireless/rsi/rsi_coex.h b/drivers/net/wireless/rsi/rsi_coex.h new file mode 100644 index 000000000000..0fdc67f37a56 --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_coex.h @@ -0,0 +1,37 @@ +/** + * Copyright (c) 2018 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __RSI_COEX_H__ +#define __RSI_COEX_H__ + +#include "rsi_common.h" + +#ifdef CONFIG_RSI_COEX +#define COMMON_CARD_READY_IND 0 +#define NUM_COEX_TX_QUEUES 5 + +struct rsi_coex_ctrl_block { + struct rsi_common *priv; + struct sk_buff_head coex_tx_qs[NUM_COEX_TX_QUEUES]; + struct rsi_thread coex_tx_thread; +}; + +int rsi_coex_attach(struct rsi_common *common); +void rsi_coex_detach(struct rsi_common *common); +int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 proto_type); +int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg); +#endif +#endif diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index d07dbba61727..d9ff3b8be86e 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -62,6 +62,7 @@ static inline int rsi_create_kthread(struct rsi_common *common, u8 *name) { init_completion(&thread->completion); + atomic_set(&thread->thread_done, 0); thread->task = kthread_run(func_ptr, common, "%s", name); if (IS_ERR(thread->task)) return (int)PTR_ERR(thread->task); @@ -80,9 +81,9 @@ static inline int rsi_kill_thread(struct rsi_thread *handle) void rsi_mac80211_detach(struct rsi_hw *hw); u16 rsi_get_connected_channel(struct ieee80211_vif *vif); -struct rsi_hw *rsi_91x_init(void); +struct rsi_hw *rsi_91x_init(u16 oper_mode); void rsi_91x_deinit(struct rsi_hw *adapter); -int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len); +int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len); #ifdef CONFIG_PM int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan); #endif diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index a09d36b6b765..786dccd0b732 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -17,6 +17,17 @@ #ifndef __RSI_HAL_H__ #define __RSI_HAL_H__ +/* Device Operating modes */ +#define DEV_OPMODE_WIFI_ALONE 1 +#define DEV_OPMODE_BT_ALONE 4 +#define DEV_OPMODE_BT_LE_ALONE 8 +#define DEV_OPMODE_BT_DUAL 12 +#define DEV_OPMODE_STA_BT 5 +#define DEV_OPMODE_STA_BT_LE 9 +#define DEV_OPMODE_STA_BT_DUAL 13 +#define DEV_OPMODE_AP_BT 6 +#define DEV_OPMODE_AP_BT_DUAL 14 + #define FLASH_WRITE_CHUNK_SIZE (4 * 1024) #define FLASH_SECTOR_SIZE (4 * 1024) @@ -103,6 +114,7 @@ #define FW_FLASH_OFFSET 0x820 #define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200) +#define MAX_DWORD_ALIGN_BYTES 64 struct bl_header { __le32 flags; @@ -145,8 +157,18 @@ struct rsi_data_desc { u8 sta_id; } __packed; +struct rsi_bt_desc { + __le16 len_qno; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; + __le16 reserved4; + __le16 bt_pkt_type; +} __packed; + int rsi_hal_device_init(struct rsi_hw *adapter); int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb); int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb); +int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 8cab630af4a5..ef4fa323694b 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -20,6 +20,7 @@ #include <linux/string.h> #include <linux/skbuff.h> #include <net/mac80211.h> +#include <net/rsi_91x.h> struct rsi_sta { struct ieee80211_sta *sta; @@ -85,10 +86,6 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define MGMT_HW_Q 10 #define BEACON_HW_Q 11 -/* Queue information */ -#define RSI_COEX_Q 0x0 -#define RSI_WIFI_MGMT_Q 0x4 -#define RSI_WIFI_DATA_Q 0x5 #define IEEE80211_MGMT_FRAME 0x00 #define IEEE80211_CTL_FRAME 0x04 @@ -115,6 +112,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define RSI_WOW_NO_CONNECTION BIT(1) #define RSI_DEV_9113 1 +#define RSI_MAX_RX_PKTS 64 struct version_info { u16 major; @@ -209,6 +207,7 @@ struct rsi_common { struct rsi_hw *priv; struct vif_priv vif_info[RSI_MAX_VIFS]; + void *coex_cb; bool mgmt_q_block; struct version_info lmac_ver; @@ -273,6 +272,8 @@ struct rsi_common { u8 obm_ant_sel_val; int tx_power; u8 ant_in_use; + /* Mutex used for writing packet to bus */ + struct mutex tx_bus_mutex; bool hibernate_resume; bool reinit_hw; u8 wow_flags; @@ -291,11 +292,8 @@ struct rsi_common { bool p2p_enabled; struct timer_list roc_timer; struct ieee80211_vif *roc_vif; -}; -enum host_intf { - RSI_HOST_INTF_SDIO = 0, - RSI_HOST_INTF_USB + void *bt_adapter; }; struct eepromrw_info { @@ -322,7 +320,7 @@ struct rsi_hw { struct device *device; u8 sc_nvifs; - enum host_intf rsi_host_intf; + enum rsi_host_intf rsi_host_intf; u16 block_size; enum ps_state ps_state; struct rsi_ps_info ps_info; @@ -343,7 +341,6 @@ struct rsi_hw { void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num); - int (*rx_urb_submit)(struct rsi_hw *adapter); int (*determine_event_timeout)(struct rsi_hw *adapter); }; @@ -367,4 +364,8 @@ struct rsi_host_intf_ops { u8 *fw); int (*reinit_device)(struct rsi_hw *adapter); }; + +enum rsi_host_intf rsi_get_host_intf(void *priv); +void rsi_set_bt_context(void *priv, void *bt_context); + #endif diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 389094a3f91c..cf6567ae5bbe 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -57,12 +57,14 @@ #define WOW_PATTERN_SIZE 256 /* Receive Frame Types */ +#define RSI_RX_DESC_MSG_TYPE_OFFSET 2 #define TA_CONFIRM_TYPE 0x01 #define RX_DOT11_MGMT 0x02 #define TX_STATUS_IND 0x04 #define BEACON_EVENT_IND 0x08 #define PROBEREQ_CONFIRM 2 #define CARD_READY_IND 0x00 +#define SLEEP_NOTIFY_IND 0x06 #define RSI_DELETE_PEER 0x0 #define RSI_ADD_PEER 0x1 @@ -638,6 +640,7 @@ static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno) *addr = cpu_to_le16(len | ((qno & 7) << 12)); } +int rsi_handle_card_ready(struct rsi_common *common, u8 *msg); int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg); int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, u8 *mac_addr, u8 vap_id, u8 vap_status); diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 49c549ba6682..ead8e7c4df3a 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -46,6 +46,8 @@ enum sdio_interrupt_type { #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 +#define RSI_MASTER_REG_BUF_SIZE 12 + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3 #define RSI_FN1_INT_REGISTER 0xf9 #define RSI_INT_ENABLE_REGISTER 0x04 @@ -105,6 +107,11 @@ struct receive_info { u32 buf_available_counter; }; +struct rsi_sdio_rx_q { + u8 num_rx_pkts; + struct sk_buff_head head; +}; + struct rsi_91x_sdiodev { struct sdio_func *pfunction; struct task_struct *sdio_irq_task; @@ -117,6 +124,8 @@ struct rsi_91x_sdiodev { u16 tx_blk_size; u8 write_fail; bool buff_status_updated; + struct rsi_sdio_rx_q rx_q; + struct rsi_thread rx_thread; }; void rsi_interrupt_handler(struct rsi_hw *adapter); @@ -131,4 +140,5 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word); void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit); int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter); int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num); +void rsi_sdio_rx_thread(struct rsi_common *common); #endif diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 891daea2d932..a88d59295a98 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -31,7 +31,7 @@ #define USB_VENDOR_REGISTER_WRITE 0x16 #define RSI_USB_TX_HEAD_ROOM 128 -#define MAX_RX_URBS 1 +#define MAX_RX_URBS 2 #define MAX_BULK_EP 8 #define WLAN_EP 1 #define BT_EP 2 @@ -39,19 +39,28 @@ #define RSI_USB_BUF_SIZE 4096 #define RSI_USB_CTRL_BUF_SIZE 0x04 +struct rx_usb_ctrl_block { + u8 *data; + struct urb *rx_urb; + struct sk_buff *rx_skb; + u8 ep_num; +}; + struct rsi_91x_usbdev { + void *priv; struct rsi_thread rx_thread; u8 endpoint; struct usb_device *usbdev; struct usb_interface *pfunction; - struct urb *rx_usb_urb[MAX_RX_URBS]; + struct rx_usb_ctrl_block rx_cb[MAX_RX_URBS]; u8 *tx_buffer; - __le16 bulkin_size; - u8 bulkin_endpoint_addr; + __le16 bulkin_size[MAX_BULK_EP]; + u8 bulkin_endpoint_addr[MAX_BULK_EP]; __le16 bulkout_size[MAX_BULK_EP]; u8 bulkout_endpoint_addr[MAX_BULK_EP]; u32 tx_blk_size; u8 write_fail; + struct sk_buff_head rx_q; }; static inline int rsi_usb_check_queue_status(struct rsi_hw *adapter, u8 q_num) diff --git a/drivers/net/wireless/st/Kconfig b/drivers/net/wireless/st/Kconfig index 969b4f6e53b5..ff69a80a9633 100644 --- a/drivers/net/wireless/st/Kconfig +++ b/drivers/net/wireless/st/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_ST If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_ST diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c index 34f97c31eecf..295cb1a29f25 100644 --- a/drivers/net/wireless/st/cw1200/debug.c +++ b/drivers/net/wireless/st/cw1200/debug.c @@ -398,15 +398,15 @@ int cw1200_debug_init(struct cw1200_common *priv) if (!d->debugfs_phy) goto err; - if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy, + if (!debugfs_create_file("status", 0400, d->debugfs_phy, priv, &fops_status)) goto err; - if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy, + if (!debugfs_create_file("counters", 0400, d->debugfs_phy, priv, &fops_counters)) goto err; - if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy, + if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv, &fops_wsm_dumps)) goto err; diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index a186d1df1f29..90dc979f260b 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -46,7 +46,7 @@ MODULE_ALIAS("cw1200_core"); /* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */ static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00}; -module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO); +module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, 0444); MODULE_PARM_DESC(macaddr, "Override platform_data MAC address"); static char *cw1200_sdd_path; diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig index 92fbd6597e34..366c687445ad 100644 --- a/drivers/net/wireless/ti/Kconfig +++ b/drivers/net/wireless/ti/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_TI If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_TI diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 037defd10b91..bd8641ad953b 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -122,8 +122,7 @@ static int wl1251_fetch_nvs(struct wl1251 *wl) goto out; } - wl->nvs_len = fw->size; - wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL); + wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!wl->nvs) { wl1251_error("could not allocate memory for the nvs file"); @@ -131,6 +130,8 @@ static int wl1251_fetch_nvs(struct wl1251 *wl) goto out; } + wl->nvs_len = fw->size; + ret = 0; out: @@ -202,13 +203,6 @@ static int wl1251_chip_wakeup(struct wl1251 *wl) goto out; } - if (wl->nvs == NULL && !wl->use_eeprom) { - /* No NVS from netlink, try to get it from the filesystem */ - ret = wl1251_fetch_nvs(wl); - if (ret < 0) - goto out; - } - out: return ret; } @@ -1446,6 +1440,61 @@ static int wl1251_read_eeprom_mac(struct wl1251 *wl) return 0; } +#define NVS_OFF_MAC_LEN 0x19 +#define NVS_OFF_MAC_ADDR_LO 0x1a +#define NVS_OFF_MAC_ADDR_HI 0x1b +#define NVS_OFF_MAC_DATA 0x1c + +static int wl1251_check_nvs_mac(struct wl1251 *wl) +{ + if (wl->nvs_len < 0x24) + return -ENODATA; + + /* length is 2 and data address is 0x546c (ANDed with 0xfffe) */ + if (wl->nvs[NVS_OFF_MAC_LEN] != 2 || + wl->nvs[NVS_OFF_MAC_ADDR_LO] != 0x6d || + wl->nvs[NVS_OFF_MAC_ADDR_HI] != 0x54) + return -EINVAL; + + return 0; +} + +static int wl1251_read_nvs_mac(struct wl1251 *wl) +{ + u8 mac[ETH_ALEN]; + int i, ret; + + ret = wl1251_check_nvs_mac(wl); + if (ret) + return ret; + + /* MAC is stored in reverse order */ + for (i = 0; i < ETH_ALEN; i++) + mac[i] = wl->nvs[NVS_OFF_MAC_DATA + ETH_ALEN - i - 1]; + + /* 00:00:20:07:03:09 is in example file wl1251-nvs.bin, so invalid */ + if (ether_addr_equal_unaligned(mac, "\x00\x00\x20\x07\x03\x09")) + return -EINVAL; + + memcpy(wl->mac_addr, mac, ETH_ALEN); + return 0; +} + +static int wl1251_write_nvs_mac(struct wl1251 *wl) +{ + int i, ret; + + ret = wl1251_check_nvs_mac(wl); + if (ret) + return ret; + + /* MAC is stored in reverse order */ + for (i = 0; i < ETH_ALEN; i++) + wl->nvs[NVS_OFF_MAC_DATA + i] = wl->mac_addr[ETH_ALEN - i - 1]; + + return 0; +} + static int wl1251_register_hw(struct wl1251 *wl) { int ret; @@ -1489,8 +1538,33 @@ int wl1251_init_ieee80211(struct wl1251 *wl) wl->hw->queues = 4; + if (wl->nvs == NULL && !wl->use_eeprom) { + ret = wl1251_fetch_nvs(wl); + if (ret < 0) + goto out; + } + if (wl->use_eeprom) - wl1251_read_eeprom_mac(wl); + ret = wl1251_read_eeprom_mac(wl); + else + ret = wl1251_read_nvs_mac(wl); + + if (ret == 0 && !is_valid_ether_addr(wl->mac_addr)) + ret = -EINVAL; + + if (ret < 0) { + /* + * In case our MAC address is not correctly set, + * we use a random but Nokia MAC. + */ + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; + memcpy(wl->mac_addr, nokia_oui, 3); + get_random_bytes(wl->mac_addr + 3, 3); + if (!wl->use_eeprom) + wl1251_write_nvs_mac(wl); + wl1251_warning("MAC address in eeprom or nvs data is not valid"); + wl1251_warning("Setting random MAC address: %pM", wl->mac_addr); + } ret = wl1251_register_hw(wl); if (ret) @@ -1511,7 +1585,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void) struct ieee80211_hw *hw; struct wl1251 *wl; int i; - static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; hw = ieee80211_alloc_hw(sizeof(*wl), &wl1251_ops); if (!hw) { @@ -1561,13 +1634,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void) INIT_WORK(&wl->irq_work, wl1251_irq_work); INIT_WORK(&wl->tx_work, wl1251_tx_work); - /* - * In case our MAC address is not correctly set, - * we use a random but Nokia MAC. - */ - memcpy(wl->mac_addr, nokia_oui, 3); - get_random_bytes(wl->mac_addr + 3, 3); - wl->state = WL1251_STATE_OFF; mutex_init(&wl->mutex); spin_lock_init(&wl->wl_lock); diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index de2fa6705574..12ed14ebc307 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -221,10 +221,8 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, struct sk_buff *newskb = skb_copy_expand(skb, 0, 3, GFP_KERNEL); - if (unlikely(newskb == NULL)) { - wl1251_error("Can't allocate skb!"); + if (unlikely(newskb == NULL)) return -EINVAL; - } tx_hdr = (struct tx_double_buffer_desc *) newskb->data; diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 0cf3b4013dd6..ca0f936fc119 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -2092,54 +2092,51 @@ static struct platform_driver wl18xx_driver = { }; module_platform_driver(wl18xx_driver); -module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR); +module_param_named(ht_mode, ht_mode_param, charp, 0400); MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20"); -module_param_named(board_type, board_type_param, charp, S_IRUSR); +module_param_named(board_type, board_type_param, charp, 0400); MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or " "dvp"); -module_param_named(checksum, checksum_param, bool, S_IRUSR); +module_param_named(checksum, checksum_param, bool, 0400); MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)"); -module_param_named(dc2dc, dc2dc_param, int, S_IRUSR); +module_param_named(dc2dc, dc2dc_param, int, 0400); MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)"); -module_param_named(n_antennas_2, n_antennas_2_param, int, S_IRUSR); +module_param_named(n_antennas_2, n_antennas_2_param, int, 0400); MODULE_PARM_DESC(n_antennas_2, "Number of installed 2.4GHz antennas: 1 (default) or 2"); -module_param_named(n_antennas_5, n_antennas_5_param, int, S_IRUSR); +module_param_named(n_antennas_5, n_antennas_5_param, int, 0400); MODULE_PARM_DESC(n_antennas_5, "Number of installed 5GHz antennas: 1 (default) or 2"); -module_param_named(low_band_component, low_band_component_param, int, - S_IRUSR); +module_param_named(low_band_component, low_band_component_param, int, 0400); MODULE_PARM_DESC(low_band_component, "Low band component: u8 " "(default is 0x01)"); module_param_named(low_band_component_type, low_band_component_type_param, - int, S_IRUSR); + int, 0400); MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 " "(default is 0x05 or 0x06 depending on the board_type)"); -module_param_named(high_band_component, high_band_component_param, int, - S_IRUSR); +module_param_named(high_band_component, high_band_component_param, int, 0400); MODULE_PARM_DESC(high_band_component, "High band component: u8, " "(default is 0x01)"); module_param_named(high_band_component_type, high_band_component_type_param, - int, S_IRUSR); + int, 0400); MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 " "(default is 0x09)"); module_param_named(pwr_limit_reference_11_abg, - pwr_limit_reference_11_abg_param, int, S_IRUSR); + pwr_limit_reference_11_abg_param, int, 0400); MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 " "(default is 0xc8)"); -module_param_named(num_rx_desc, - num_rx_desc_param, int, S_IRUSR); +module_param_named(num_rx_desc, num_rx_desc_param, int, 0400); MODULE_PARM_DESC(num_rx_desc_param, "Number of Rx descriptors: u8 (default is 32)"); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 09714034dbf1..3a51ab116e79 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6630,20 +6630,20 @@ EXPORT_SYMBOL_GPL(wlcore_remove); u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); -module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); +module_param_named(debug_level, wl12xx_debug_level, uint, 0600); MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); module_param_named(fwlog, fwlog_param, charp, 0); MODULE_PARM_DESC(fwlog, "FW logger options: continuous, dbgpins or disable"); -module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR); +module_param(fwlog_mem_blocks, int, 0600); MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks"); -module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR); +module_param(bug_on_recovery, int, 0600); MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); -module_param(no_recovery, int, S_IRUSR | S_IWUSR); +module_param(no_recovery, int, 0600); MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index f8a1fea64e25..1f727babbea0 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -469,7 +469,7 @@ static void __exit wl1271_exit(void) module_init(wl1271_init); module_exit(wl1271_exit); -module_param(dump, bool, S_IRUSR | S_IWUSR); +module_param(dump, bool, 0600); MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index b72e2101488b..d31eb775e023 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -80,7 +80,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, return count; } -static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(bt_coex_state, 0644, wl1271_sysfs_show_bt_coex_state, wl1271_sysfs_store_bt_coex_state); @@ -103,8 +103,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, return len; } -static DEVICE_ATTR(hw_pg_ver, S_IRUGO, - wl1271_sysfs_show_hw_pg_ver, NULL); +static DEVICE_ATTR(hw_pg_ver, 0444, wl1271_sysfs_show_hw_pg_ver, NULL); static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -139,7 +138,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, } static const struct bin_attribute fwlog_attr = { - .attr = {.name = "fwlog", .mode = S_IRUSR}, + .attr = { .name = "fwlog", .mode = 0400 }, .read = wl1271_sysfs_read_fwlog, }; diff --git a/drivers/net/wireless/zydas/Kconfig b/drivers/net/wireless/zydas/Kconfig index a58c0f65e376..b327f86f05be 100644 --- a/drivers/net/wireless/zydas/Kconfig +++ b/drivers/net/wireless/zydas/Kconfig @@ -5,8 +5,8 @@ config WLAN_VENDOR_ZYDAS If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about cards. If you say Y, you will be asked for + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for your specific card in the following questions. if WLAN_VENDOR_ZYDAS diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index b785742bfd9e..b01b44a5d16e 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -509,7 +509,6 @@ void zd_mac_tx_failed(struct urb *urb) int found = 0; int i, position = 0; - q = &mac->ack_wait_queue; spin_lock_irqsave(&q->lock, flags); skb_queue_walk(q, skb) { diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index b1cf7c6f407a..ef5887037b22 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -419,7 +419,7 @@ static void xenvif_rx_extra_slot(struct xenvif_queue *queue, BUG(); } -void xenvif_rx_skb(struct xenvif_queue *queue) +static void xenvif_rx_skb(struct xenvif_queue *queue) { struct xenvif_pkt_state pkt; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index a56d3eab35dd..e1aef253601e 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -224,7 +224,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif) snprintf(filename, sizeof(filename), "io_ring_q%d", i); pfile = debugfs_create_file(filename, - S_IRUSR | S_IWUSR, + 0600, vif->xenvif_dbg_root, &vif->queues[i], &xenvif_dbg_io_ring_ops_fops); @@ -235,7 +235,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif) if (vif->ctrl_irq) { pfile = debugfs_create_file("ctrl", - S_IRUSR, + 0400, vif->xenvif_dbg_root, vif, &xenvif_dbg_ctrl_ops_fops); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8328d395e332..4dd0668003e7 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: + break; + case XenbusStateUnknown: + wake_up_all(&module_unload_q); break; case XenbusStateInitWait: @@ -2110,9 +2113,9 @@ static ssize_t store_rxbuf(struct device *dev, return len; } -static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); -static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); -static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL); +static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); +static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); +static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); static struct attribute *xennet_dev_attrs[] = { &dev_attr_rxbuf_min.attr, @@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev) xenbus_switch_state(dev, XenbusStateClosing); wait_event(module_unload_q, xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing); + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown); xenbus_switch_state(dev, XenbusStateClosed); wait_event(module_unload_q, |